我是kinect开发的新手,也是stackoverflow的新手! 这是我的情况:
拥有深度相机,我可以轻松获得深度数据。我想要做的是,一旦我检测到相机前面有一个人(玩家), 我只会提取玩家的深度像素,并将其放在透明背景上,这样输出是仅在播放器的深度图像的静态图像,在透明背景上。
我想问 是否可以完成这项工作 ?我做了一些研究,发现一些函数可能有助于像SkeletonToDepthImage()或深度像素数据(包括距离和玩家索引)。
答案 0 :(得分:1)
我假设你想要根据深度数据呈现玩家的轮廓,只显示他们的轮廓。这是对的吗?
Kinect for Windows Developer Toolkit提供了多个完成此操作的示例。 “绿屏”示例向您展示如何提取深度数据并将其映射到颜色流,以便在所选背景上显示玩家。 “基本交互”示例有一个剪影示例,它正是我正在解释您想要的内容。
查看Microsoft提供的示例,以便更好地了解Kinect的许多基本使用场景。
基于Basic Interactions项目中的剪影示例,我编写了一个剪影控件。控件的核心由以下两个函数组成(即实际产生轮廓的函数。
private void OnSkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
{
if (skeletonFrame != null && skeletonFrame.SkeletonArrayLength > 0)
{
if (_skeletons == null || _skeletons.Length != skeletonFrame.SkeletonArrayLength)
{
_skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
}
skeletonFrame.CopySkeletonDataTo(_skeletons);
// grab the tracked skeleton and set the playerIndex for use pulling
// the depth data out for the silhouette.
// TODO: this assumes only a single tracked skeleton, we want to find the
// closest person out of the tracked skeletons (see above).
this.playerIndex = -1;
for (int i = 0; i < _skeletons.Length; i++)
{
if (_skeletons[i].TrackingState != SkeletonTrackingState.NotTracked)
{
this.playerIndex = i+1;
}
}
}
}
}
private void OnDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame != null)
{
// check if the format has changed.
bool haveNewFormat = this.lastImageFormat != depthFrame.Format;
if (haveNewFormat)
{
this.pixelData = new short[depthFrame.PixelDataLength];
this.depthFrame32 = new byte[depthFrame.Width * depthFrame.Height * Bgra32BytesPerPixel];
this.convertedDepthBits = new byte[this.depthFrame32.Length];
}
depthFrame.CopyPixelDataTo(this.pixelData);
for (int i16 = 0, i32 = 0; i16 < pixelData.Length && i32 < depthFrame32.Length; i16++, i32 += 4)
{
int player = pixelData[i16] & DepthImageFrame.PlayerIndexBitmask;
if (player == this.playerIndex)
{
convertedDepthBits[i32 + RedIndex] = 0x44;
convertedDepthBits[i32 + GreenIndex] = 0x23;
convertedDepthBits[i32 + BlueIndex] = 0x59;
convertedDepthBits[i32 + 3] = 0x66;
}
else if (player > 0)
{
convertedDepthBits[i32 + RedIndex] = 0xBC;
convertedDepthBits[i32 + GreenIndex] = 0xBE;
convertedDepthBits[i32 + BlueIndex] = 0xC0;
convertedDepthBits[i32 + 3] = 0x66;
}
else
{
convertedDepthBits[i32 + RedIndex] = 0x0;
convertedDepthBits[i32 + GreenIndex] = 0x0;
convertedDepthBits[i32 + BlueIndex] = 0x0;
convertedDepthBits[i32 + 3] = 0x0;
}
}
if (silhouette == null || haveNewFormat)
{
silhouette = new WriteableBitmap(
depthFrame.Width,
depthFrame.Height,
96,
96,
PixelFormats.Bgra32,
null);
SilhouetteImage.Source = silhouette;
}
silhouette.WritePixels(
new Int32Rect(0, 0, depthFrame.Width, depthFrame.Height),
convertedDepthBits,
depthFrame.Width * Bgra32BytesPerPixel,
0);
Silhouette = silhouette;
this.lastImageFormat = depthFrame.Format;
}
}
}