我使用Kinect for Windows SDK提取骨架和彩色图像,然后在每帧的彩色图像上绘制骨架点。我在OpenCV中使用cvCircle()
函数在kinect彩色图像上绘制提取的骨架点(转换为IplImage
)。
当我挥手时,与挥动手相对应的骨架点正在偏离骨架点应该的实际位置。但是样本中没有发生这种情况:" Kinect Explorer-D2D"
我发布了我的代码。我的代码模仿SDK样本:" Skeleton Basics-D2D"。不同之处在于我使用控制台应用程序,而示例是Windows应用程序(我不熟悉Windows应用程序),我添加了kinect彩色图像流。
IplImage *frame = cvCreateImage(cvSize(640, 480), 8, 3);
HRESULT hr;
hr = NuiInitialize(NUI_INITIALIZE_FLAG_USES_SKELETON | NUI_INITIALIZE_FLAG_USES_COLOR);
if(hr != S_OK)
return hr;
HANDLE SkeletonEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
hr = NuiSkeletonTrackingEnable(SkeletonEvent, 0);
if(hr != S_OK)
return hr;
HANDLE ColorEvent = CreateEventW(NULL, TRUE, FALSE, NULL);
HANDLE h2 = NULL;
hr = NuiImageStreamOpen(NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480, 0, 2, ColorEvent, &h2);
if(hr != S_OK)
return hr;
NUI_SKELETON_FRAME skeletonFrame = {0};
while(1){
WaitForSingleObject(SkeletonEvent, INFINITE);
//get skeleton data
hr = NuiSkeletonGetNextFrame(0, &skeletonFrame);
if( hr != S_OK )
continue;
NuiTransformSmooth(&skeletonFrame, NULL);
//get color image data
const NUI_IMAGE_FRAME * pImageFrame = NULL;
hr = NuiImageStreamGetNextFrame(h2, 0, &pImageFrame );
if( hr != S_OK )
continue;
INuiFrameTexture *pTexture = pImageFrame->pFrameTexture;
NUI_LOCKED_RECT LockedRect;
pTexture->LockRect( 0, &LockedRect, NULL, 0 );
//Convert the color image to IplImage frame
if(LockedRect.Pitch != 0)
ConvertDataToIplImage(LockedRect, frame);
//Draw skeleton points on frame
for(int i=0; i<6; i++){
NUI_SKELETON_TRACKING_STATE trackingState = skeletonFrame.SkeletonData[i].eTrackingState;
if (NUI_SKELETON_TRACKED == trackingState)
for(int j=0; j<20; j++)
cvCircle(frame,
SkeletonToScreen(skeletonFrame.SkeletonData[i].SkeletonPositions[j],640, 480,pImageFrame),
4, CV_RGB(255,0,0),2);
}
cvShowImage("Frame",frame);
NuiImageStreamReleaseFrame( h2, pImageFrame );
cvWaitKey(30);
}
函数SkeletonToScreen()
是:
CvPoint SkeletonToScreen(Vector4 skeletonPoint, int width, int height, const
NUI_IMAGE_FRAME *imgFrame)
{
LONG x, y;
USHORT depth;
LONG ColorX, ColorY;
NuiTransformSkeletonToDepthImage(skeletonPoint, &x, &y, &depth);
NuiImageGetColorPixelCoordinatesFromDepthPixelAtResolution(NUI_IMAGE_RESOLUTION_640x480,
NUI_IMAGE_RESOLUTION_320x240,
&imgFrame->ViewArea,
x,
y,
depth,
&ColorX,
&ColorY);
float screenPointX = static_cast<float>(ColorX * width) / 640;
float screenPointY = static_cast<float>(ColorY * height) / 480;
return cvPoint(screenPointX, screenPointY);
}
我的结果是http://www.dropbox.com/s/d0kudegrnecklel/my%20result.jpg。 Skeleton Basics-D2D结果为http://www.dropbox.com/s/y9rvvoxijv4rs28/the%20kinect%20explorer.jpg。
我们看到当手快速挥动时,Skeleton Basics-D2D很好地跟踪骨架点,但我的骨架点远离我的手和手腕。