我是C ++的新手,并试图将OpenCV添加到微软的Kinect示例中。通过修改此函数,我能够为ColorBasics-D2D样本做到这一点
void CColorBasics::ProcessColor()
{
HRESULT hr;
NUI_IMAGE_FRAME imageFrame;
// Attempt to get the color frame
hr = m_pNuiSensor->NuiImageStreamGetNextFrame(m_pColorStreamHandle, 0, &imageFrame);
if (FAILED(hr))
{
return;
}
INuiFrameTexture * pTexture = imageFrame.pFrameTexture;
NUI_LOCKED_RECT LockedRect;
// Lock the frame data so the Kinect knows not to modify it while we're reading it
pTexture->LockRect(0, &LockedRect, NULL, 0);
// Make sure we've received valid data
if (LockedRect.Pitch != 0)
{
BYTE * pBuffer = (BYTE*) LockedRect.pBits;
cvSetData(img,(BYTE*) pBuffer, img->widthStep);
Mat &m = Mat(img);
Mat &hsv = Mat();
vector<Mat> mv = vector<Mat>(3,Mat(cvSize(640,480),CV_8UC1));
cvtColor(m,hsv,CV_BGR2HSV);
cvtColor(hsv,m,CV_HSV2BGR);//*/
IplImage iplimg(m);
cvNamedWindow("rgb",1);
cvShowImage("rgb",&iplimg);
// Draw the data with Direct2D
m_pDrawColor->Draw(static_cast<BYTE *>(LockedRect.pBits), LockedRect.size);
// If the user pressed the screenshot button, save a screenshot
if (m_bSaveScreenshot)
{
WCHAR statusMessage[cStatusMessageMaxLen];
// Retrieve the path to My Photos
WCHAR screenshotPath[MAX_PATH];
GetScreenshotFileName(screenshotPath, _countof(screenshotPath));
// Write out the bitmap to disk
hr = SaveBitmapToFile(static_cast<BYTE *>(LockedRect.pBits), cColorWidth, cColorHeight, 32, screenshotPath);
if (SUCCEEDED(hr))
{
// Set the status bar to show where the screenshot was saved
StringCchPrintf( statusMessage, cStatusMessageMaxLen, L"Screenshot saved to %s", screenshotPath);
}
else
{
StringCchPrintf( statusMessage, cStatusMessageMaxLen, L"Failed to write screenshot to %s", screenshotPath);
}
SetStatusMessage(statusMessage);
// toggle off so we don't save a screenshot again next frame
m_bSaveScreenshot = false;
}
}
// We're done with the texture so unlock it
pTexture->UnlockRect(0);
// Release the frame
m_pNuiSensor->NuiImageStreamReleaseFrame(m_pColorStreamHandle, &imageFrame);
}
这很好用。但是,当我想在SkeletalViewer示例中添加这样的内容时,它只显示一个空窗口。
/// <summary>
/// Handle new color data
/// </summary>
/// <returns>true if a frame was processed, false otherwise</returns>
bool CSkeletalViewerApp::Nui_GotColorAlert( )
{
NUI_IMAGE_FRAME imageFrame;
bool processedFrame = true;
HRESULT hr = m_pNuiSensor->NuiImageStreamGetNextFrame( m_pVideoStreamHandle, 0, &imageFrame );
if ( FAILED( hr ) )
{
return false;
}
INuiFrameTexture * pTexture = imageFrame.pFrameTexture;
NUI_LOCKED_RECT LockedRect;
pTexture->LockRect( 0, &LockedRect, NULL, 0 );
if ( LockedRect.Pitch != 0 )
{
BYTE * pBuffer = (BYTE*) LockedRect.pBits;
cvSetData(img,(BYTE*) pBuffer, img->widthStep);
Mat m(img);
IplImage iplimg(m);
cvNamedWindow("rgb",1);
cvShowImage("rgb",&iplimg);
m_pDrawColor->Draw( static_cast<BYTE *>(LockedRect.pBits), LockedRect.size );
}
else
{
OutputDebugString( L"Buffer length of received texture is bogus\r\n" );
processedFrame = false;
}
pTexture->UnlockRect( 0 );
m_pNuiSensor->NuiImageStreamReleaseFrame( m_pVideoStreamHandle, &imageFrame );
return processedFrame;
}
我不确定为什么相同的代码在此示例中不起作用。我正在使用Visual Studio 2010和OpenCV 2.4.2。
由于
答案 0 :(得分:1)
想出来。将其更改为此
if ( LockedRect.Pitch != 0 )
{
BYTE * pBuffer = static_cast<BYTE *>(LockedRect.pBits);
cvSetData(img,(BYTE*) pBuffer, img->widthStep);
Mat m(img);
IplImage iplimg(m);
cvNamedWindow("rgb",1);
cvShowImage("rgb",&iplimg);
waitKey(1);
m_pDrawColor->Draw( static_cast<BYTE *>(LockedRect.pBits), LockedRect.size );
}