我是关于opencv(c ++)和kinect的新手。我尝试用kinect的c ++拍摄视频图像。我到处搜索,但我没有找到任何东西。因为人们使用openNI或OpenKinect。我不想使用这个lib。我该怎么做??
感谢!!!
答案 0 :(得分:7)
您可以使用kinect for windows SDK来抓取帧,然后将它们转换为opencv格式。请参阅Visual Studio中的这个代码示例(在microsoft论坛的thread中找到),遗憾的是我现在没有kinect来测试代码:
#include "stdafx.h"
#define COLOR_WIDTH 640
#define COLOR_HIGHT 480
#define DEPTH_WIDTH 320
#define DEPTH_HIGHT 240
#define SKELETON_WIDTH 640
#define SKELETON_HIGHT 480
#define CHANNEL 3
BYTE buf[DEPTH_WIDTH * DEPTH_HIGHT * CHANNEL];
int drawColor(HANDLE h, IplImage* color)
{
const NUI_IMAGE_FRAME * pImageFrame = NULL;
HRESULT hr = NuiImageStreamGetNextFrame(h, 0, &pImageFrame);
if (FAILED(hr))
{
cout << "Get Image Frame Failed" << endl;
return -1;
}
NuiImageBuffer * pTexture = pImageFrame->pFrameTexture;
KINECT_LOCKED_RECT LockedRect;
pTexture->LockRect(0, &LockedRect, NULL, 0);
if (LockedRect.Pitch != 0)
{
BYTE * pBuffer = (BYTE*) LockedRect.pBits;
cvSetData(color, pBuffer, LockedRect.Pitch);
}
cvShowImage("color image", color);
NuiImageStreamReleaseFrame(h, pImageFrame);
return 0;
}
int drawDepth(HANDLE h, IplImage* depth)
{
const NUI_IMAGE_FRAME * pImageFrame = NULL;
HRESULT hr = NuiImageStreamGetNextFrame(h, 0, &pImageFrame);
if (FAILED(hr))
{
cout << "Get Image Frame Failed" << endl;
return -1;
}
// temp1 = depth;
NuiImageBuffer * pTexture = pImageFrame->pFrameTexture;
KINECT_LOCKED_RECT LockedRect;
pTexture->LockRect(0, &LockedRect, NULL, 0);
if (LockedRect.Pitch != 0)
{
USHORT * pBuff = (USHORT*) LockedRect.pBits;
for (int i = 0; i < DEPTH_WIDTH * DEPTH_HIGHT; i++)
{
BYTE index = pBuff[i] & 0x07;
USHORT realDepth = (pBuff[i] & 0xFFF8) >> 3;
BYTE scale = 255 - (BYTE)(256 * realDepth / 0x0fff);
buf[CHANNEL * i] = buf[CHANNEL * i + 1] = buf[CHANNEL * i + 2] = 0;
switch (index)
{
case 0:
buf[CHANNEL * i] = scale / 2;
buf[CHANNEL * i + 1] = scale / 2;
buf[CHANNEL * i + 2] = scale / 2;
break;
case 1:
buf[CHANNEL * i] = scale;
break;
case 2:
buf[CHANNEL * i + 1] = scale;
break;
case 3:
buf[CHANNEL * i + 2] = scale;
break;
case 4:
buf[CHANNEL * i] = scale;
buf[CHANNEL * i + 1] = scale;
break;
case 5:
buf[CHANNEL * i] = scale;
buf[CHANNEL * i + 2] = scale;
break;
case 6:
buf[CHANNEL * i + 1] = scale;
buf[CHANNEL * i + 2] = scale;
break;
case 7:
buf[CHANNEL * i] = 255 - scale / 2;
buf[CHANNEL * i + 1] = 255 - scale / 2;
buf[CHANNEL * i + 2] = 255 - scale / 2;
break;
}
}
cvSetData(depth, buf, DEPTH_WIDTH * CHANNEL);
}
NuiImageStreamReleaseFrame(h, pImageFrame);
cvShowImage("depth image", depth);
return 0;
}
int drawSkeleton(IplImage* skeleton)
{
NUI_SKELETON_FRAME SkeletonFrame;
CvPoint pt[20];
HRESULT hr = NuiSkeletonGetNextFrame(0, &SkeletonFrame);
bool bFoundSkeleton = false;
for (int i = 0; i < NUI_SKELETON_COUNT; i++)
{
if (SkeletonFrame.SkeletonData[i].eTrackingState
== NUI_SKELETON_TRACKED)
{
bFoundSkeleton = true;
}
}
// Has skeletons!
//
if (bFoundSkeleton)
{
NuiTransformSmooth(&SkeletonFrame, NULL);
memset(skeleton->imageData, 0, skeleton->imageSize);
for (int i = 0; i < NUI_SKELETON_COUNT; i++)
{
if (SkeletonFrame.SkeletonData[i].eTrackingState
== NUI_SKELETON_TRACKED)
{
for (int j = 0; j < NUI_SKELETON_POSITION_COUNT; j++)
{
float fx, fy;
NuiTransformSkeletonToDepthImageF(
SkeletonFrame.SkeletonData[i].SkeletonPositions[j],
&fx, &fy);
pt[j].x = (int) (fx * SKELETON_WIDTH + 0.5f);
pt[j].y = (int) (fy * SKELETON_HIGHT + 0.5f);
cvCircle(skeleton, pt[j], 5, CV_RGB(255, 0, 0), -1);
}
cvLine(skeleton, pt[NUI_SKELETON_POSITION_HEAD],
pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],
pt[NUI_SKELETON_POSITION_SPINE], CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_SPINE],
pt[NUI_SKELETON_POSITION_HIP_CENTER],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_HAND_RIGHT],
pt[NUI_SKELETON_POSITION_WRIST_RIGHT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_WRIST_RIGHT],
pt[NUI_SKELETON_POSITION_ELBOW_RIGHT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_ELBOW_RIGHT],
pt[NUI_SKELETON_POSITION_SHOULDER_RIGHT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_SHOULDER_RIGHT],
pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_SHOULDER_CENTER],
pt[NUI_SKELETON_POSITION_SHOULDER_LEFT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_SHOULDER_LEFT],
pt[NUI_SKELETON_POSITION_ELBOW_LEFT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_ELBOW_LEFT],
pt[NUI_SKELETON_POSITION_WRIST_LEFT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_WRIST_LEFT],
pt[NUI_SKELETON_POSITION_HAND_LEFT], CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_HIP_CENTER],
pt[NUI_SKELETON_POSITION_HIP_RIGHT], CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_HIP_RIGHT],
pt[NUI_SKELETON_POSITION_KNEE_RIGHT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_KNEE_RIGHT],
pt[NUI_SKELETON_POSITION_ANKLE_RIGHT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_ANKLE_RIGHT],
pt[NUI_SKELETON_POSITION_FOOT_RIGHT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_HIP_CENTER],
pt[NUI_SKELETON_POSITION_HIP_LEFT], CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_HIP_LEFT],
pt[NUI_SKELETON_POSITION_KNEE_LEFT], CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_KNEE_LEFT],
pt[NUI_SKELETON_POSITION_ANKLE_LEFT],
CV_RGB(0, 255, 0));
cvLine(skeleton, pt[NUI_SKELETON_POSITION_ANKLE_LEFT],
pt[NUI_SKELETON_POSITION_FOOT_LEFT], CV_RGB(0, 255, 0));
}
}
}
cvShowImage("skeleton image", skeleton);
return 0;
}
int main(int argc, char * argv[])
{
IplImage* color = cvCreateImageHeader(cvSize(COLOR_WIDTH, COLOR_HIGHT), IPL_DEPTH_8U, 4);
IplImage* depth = cvCreateImageHeader(cvSize(DEPTH_WIDTH, DEPTH_HIGHT),IPL_DEPTH_8U, CHANNEL);
IplImage* skeleton = cvCreateImage(cvSize(SKELETON_WIDTH, SKELETON_HIGHT),IPL_DEPTH_8U, CHANNEL);
cvNamedWindow("color image", CV_WINDOW_AUTOSIZE);
cvNamedWindow("depth image", CV_WINDOW_AUTOSIZE);
cvNamedWindow("skeleton image", CV_WINDOW_AUTOSIZE);
HRESULT hr = NuiInitialize(
NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX
| NUI_INITIALIZE_FLAG_USES_COLOR
| NUI_INITIALIZE_FLAG_USES_SKELETON);
if (hr != S_OK)
{
cout << "NuiInitialize failed" << endl;
return hr;
}
HANDLE h1 = CreateEvent(NULL, TRUE, FALSE, NULL);
HANDLE h2 = NULL;
hr = NuiImageStreamOpen(NUI_IMAGE_TYPE_COLOR, NUI_IMAGE_RESOLUTION_640x480,
0, 2, h1, &h2);
if (FAILED(hr))
{
cout << "Could not open image stream video" << endl;
return hr;
}
HANDLE h3 = CreateEvent(NULL, TRUE, FALSE, NULL);
HANDLE h4 = NULL;
hr = NuiImageStreamOpen(NUI_IMAGE_TYPE_DEPTH_AND_PLAYER_INDEX,
NUI_IMAGE_RESOLUTION_320x240, 0, 2, h3, &h4);
if (FAILED(hr))
{
cout << "Could not open depth stream video" << endl;
return hr;
}
HANDLE h5 = CreateEvent(NULL, TRUE, FALSE, NULL);
hr = NuiSkeletonTrackingEnable(h5, 0);
if (FAILED(hr))
{
cout << "Could not open skeleton stream video" << endl;
return hr;
}
while (1)
{
WaitForSingleObject(h1, INFINITE);
drawColor(h2, color);
WaitForSingleObject(h3, INFINITE);
drawDepth(h4, depth);
WaitForSingleObject(h5, INFINITE);
drawSkeleton(skeleton);
//exit
int c = cvWaitKey(1);
if (c == 27 || c == 'q' || c == 'Q')
break;
}
cvReleaseImageHeader(&depth);
cvReleaseImageHeader(&color);
cvReleaseImage(&skeleton);
cvDestroyWindow("depth image");
cvDestroyWindow("color image");
cvDestroyWindow("skeleton image");
NuiShutdown();
return 0;
}
答案 1 :(得分:3)
OpenCV不提供连接和处理Kinect传感器数据的功能;除非您将Kinect视为常规网络摄像头。您将需要使用其中一个API获取数据并将其发送到OpenCV。要从Kinect获取数据,您可以使用:
如果您的雇主对其中一个API有疑问,那就有选择。但是使用OpenCV并不能消除你使用其中一个的需要。
quick search on MSDN显示主题上的多个主题。我读过的最直接的方法是在转换图像后使用cvSetData
导入数据:
<强> RGB 强>
IplImage * ovImage = NULL;
ovImage = cvCreateImage(cvSize(640, 480), 8, 4);
cvSetData(ovImage, pBuffer, ovImage->widthStep);
<强>深度强>
ovImage = cvCreateImage(cvSize(640, 480), 8, 1);
我还在GitHub上找到了freenomad_vision项目,该项目提供了对OpenCV和OpenGL的libfreenect支持。如果您不喜欢使用libfreenect,代码可以很容易地作为参考,因为传入的数据都是相同的,并且(可能)将被转换为相同的。
答案 2 :(得分:1)
如果有人被重定向到这里寻找一种更简单的方法来可视化Kinect深度流,我能够通过以下方式实现这一点 对于KinectV2。
Mat CDepthMap::getFrame()
{
IDepthFrame* frame;
Mat depthImage;
hr = _depth_reader->AcquireLatestFrame(&frame);
if (SUCCEEDED(hr)) {
const UINT imgSize = sDepthWidth*sDepthHeight; //512*424
UINT16 pixelData[imgSize];
hr = frame->CopyFrameDataToArray(imgSize, pixelData);
if (SUCCEEDED(hr)) {
depthImage = Mat(sDepthHeight,sDepthWidth, CV_8U);
for (UINT i = 0; i < imgSize; i++) {
UINT16 depth = pixelData[i];
depthImage.at<UINT8>(i) = LOWORD(depth);
}
}
SafeRelease(frame);
}
return depthImage;
}