我一直试图将深度流与我的Kinect的RGB流对齐很长一段时间。我已经阅读了几篇关于它的文章,但我必须忽略一些关键点,因为我无法让它工作......
这是我设法做的图像,我圈出了一些容易发现的错位
我一直试图尽可能减少代码,但它仍然是一大堆代码所以请耐心等待,下面的代码片段是每当有深度和RGB时Kinect SDK调用的内容框架准备好了。
正如您所看到的,我一直在尝试
ColorImagePoint colorpoint = _Sensor.CoordinateMapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, depthpoint, ColorImageFormat.RgbResolution640x480Fps30);
我更喜欢使用CoordinateMapper.MapDepthFrameToColorFrame(因为这应该可以解决问题)但我无法让它工作..我可能没有正确地做到这一点..
我正在使用Microsofts Kinect SDK 1.6
private void EventAllFramesReady(Object Sender, AllFramesReadyEventArgs e)
{
System.Drawing.Color color;
Bitmap image = null;
Bitmap depth = null;
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
// color
image = new Bitmap(colorFrame.Width, colorFrame.Height);
byte[] colorPixels = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo(colorPixels);
//lock bitmap, and work with BitmapData (way faster than SetPixel())
BitmapData imageBitmapData = image.LockBits(new Rectangle(0, 0, image.Width, image.Height),
ImageLockMode.WriteOnly,
image.PixelFormat);
IntPtr IptrImage = imageBitmapData.Scan0;
byte[] PixelsImage = new byte[image.Width * image.Height * 4];
// depth
depth = new Bitmap(depthFrame.Width, depthFrame.Height);
DepthImagePixel[] depthData = new DepthImagePixel[depthFrame.PixelDataLength];
depthFrame.CopyDepthImagePixelDataTo(depthData);
//lock bitmap, and work with BitmapData (way faster than SetPixel())
BitmapData depthBitmapData = depth.LockBits(new Rectangle(0, 0, depth.Width, depth.Height),
ImageLockMode.WriteOnly,
depth.PixelFormat);
IntPtr IptrDepth = depthBitmapData.Scan0;
byte[] PixelsDepth = new byte[depth.Width * depth.Height * 4];
DepthImagePoint depthpoint = new DepthImagePoint();
for (int x = 1; x < colorFrame.Width; x++)
{
for (int y = 1; y < colorFrame.Height; y++)
{
int i = ((y * image.Width) + x) * 4;
short depthdistanceRAW = (depthData[x + y * depth.Width]).Depth;
// convert distance value into a color
color = System.Drawing.Color.Pink;
if (depthdistanceRAW > 0 && depthdistanceRAW <= 4000)
{
int depthdistance = (int)((depthdistanceRAW / 4090f) * 255f);
color = System.Drawing.Color.FromArgb((int)(depthdistance / 2f), depthdistance, (int)(depthdistance * 0.7f));
}
depthpoint.X = x;
depthpoint.Y = y;
depthpoint.Depth = depthdistanceRAW;
ColorImagePoint colorpoint = _Sensor.CoordinateMapper.MapDepthPointToColorPoint(DepthImageFormat.Resolution640x480Fps30, depthpoint, ColorImageFormat.RgbResolution640x480Fps30);
//if (colorpoint.X > 0 && colorpoint.X <= 640 && colorpoint.Y > 0 && colorpoint.Y <= 480)
//{
int adjustedposition = ((colorpoint.Y * image.Width) + colorpoint.X) * 4;
//if (adjustedposition < depthData.Length)
//{
PixelsDepth[i] = color.B;
PixelsDepth[i + 1] = color.G;
PixelsDepth[i + 2] = color.R;
PixelsDepth[i + 3] = DepthTransparency;
//}
//}
PixelsImage[i] = colorPixels[i];
PixelsImage[i + 1] = colorPixels[i + 1];
PixelsImage[i + 2] = colorPixels[i + 2];
PixelsImage[i + 3] = 255;
}
}
Marshal.Copy(PixelsImage, 0, IptrImage, PixelsImage.Length);
image.UnlockBits(imageBitmapData);
Marshal.Copy(PixelsDepth, 0, IptrDepth, PixelsDepth.Length);
depth.UnlockBits(depthBitmapData);
}
}
_kf.UpdateImage(image); // update the RGB picture in the form
_kf.UpdateDepth(depth); // update the Depth picture in the form
}
答案 0 :(得分:0)
我发布了similar question here的答案。
Coordinate Mapping Basics-WPF C# Sample显示了如何使用此方法。您可以在下面找到代码的一些重要部分:
// Intermediate storage for the depth data received from the sensor
private DepthImagePixel[] depthPixels;
// Intermediate storage for the color data received from the camera
private byte[] colorPixels;
// Intermediate storage for the depth to color mapping
private ColorImagePoint[] colorCoordinates;
// Inverse scaling factor between color and depth
private int colorToDepthDivisor;
// Format we will use for the depth stream
private const DepthImageFormat DepthFormat = DepthImageFormat.Resolution320x240Fps30;
// Format we will use for the color stream
private const ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;
//...
// Initialization
this.colorCoordinates = new ColorImagePoint[this.sensor.DepthStream.FramePixelDataLength];
this.depthWidth = this.sensor.DepthStream.FrameWidth;
this.depthHeight = this.sensor.DepthStream.FrameHeight;
int colorWidth = this.sensor.ColorStream.FrameWidth;
int colorHeight = this.sensor.ColorStream.FrameHeight;
this.colorToDepthDivisor = colorWidth / this.depthWidth;
this.sensor.AllFramesReady += this.SensorAllFramesReady;
//...
private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
// in the middle of shutting down, so nothing to do
if (null == this.sensor)
{
return;
}
bool depthReceived = false;
bool colorReceived = false;
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (null != depthFrame)
{
// Copy the pixel data from the image to a temporary array
depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);
depthReceived = true;
}
}
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (null != colorFrame)
{
// Copy the pixel data from the image to a temporary array
colorFrame.CopyPixelDataTo(this.colorPixels);
colorReceived = true;
}
}
if (true == depthReceived)
{
this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(
DepthFormat,
this.depthPixels,
ColorFormat,
this.colorCoordinates);
// ...
int depthIndex = x + (y * this.depthWidth);
DepthImagePixel depthPixel = this.depthPixels[depthIndex];
// scale color coordinates to depth resolution
int X = colorImagePoint.X / this.colorToDepthDivisor;
int Y = colorImagePoint.Y / this.colorToDepthDivisor;
// depthPixel is the depth for the (X,Y) pixel in the color frame
}
}