我遇到深度视频流问题。我尝试从我的深度视频流中获取视频帧,但是当我拨打openni::VideoFrameRef::getData()
时,我有一个空的imgBuf
初始化深度流
status = depth.create(device, openni::SENSOR_DEPTH);
if(status == openni::STATUS_OK) {
status = depth.start();
if(status != openni::STATUS_OK) {
(new QErrorMessage(this))->showMessage(openni::OpenNI::getExtendedError());
depth.destroy();
}
} else {
(new QErrorMessage(this))->showMessage(openni::OpenNI::getExtendedError());
}
openni::VideoStream** m_streams = new openni::VideoStream*[2];
m_streams[0] = &depth;
m_streams[1] = &color;
openni::VideoFrameRef m_depthFrame;
openni::VideoFrameRef m_colorFrame;
设置视频帧和获取数据
int changedIndex;
openni::Status rc = openni::OpenNI::waitForAnyStream(m_streams, 1, &changedIndex, SAMPLE_READ_WAIT_TIMEOUT);
if (rc != openni::STATUS_OK) {
(new QErrorMessage(this))->showMessage(openni::OpenNI::getExtendedError());
return;
}
status = depth.readFrame(&m_depthFrame);
if(openni::STATUS_OK == status && m_depthFrame.isValid()) {
openni::VideoMode depthVideoMode = depth.getVideoMode();
int depthWidth = depthVideoMode.getResolutionX();
int depthHeight = depthVideoMode.getResolutionY();
int buf_size = m_depthFrame.getDataSize();
openni::SensorType s_type = m_depthFrame.getSensorType();
openni::DepthPixel* pDepth = (openni::DepthPixel*)m_depthFrame.getData();
const uint16_t* imgBuf = (const uint16_t*)m_depthFrame.getData();
cv::Mat DethBuf;
DethBuf.create(depthHeight, depthWidth, CV_16U);
memcpy(DethBuf.data, imgBuf, depthHeight * depthWidth * sizeof(uint16_t));
DethBuf.convertTo(DethBuf, CV_16U);
}
因此,imgBuf
为空。我希望有人可以帮助我找到我的错误。