我已成功使用Kinect提取点云,但我无法进一步保存它或将下一个捕获的帧添加到其中。以下是我到目前为止所发现的内容,我希望对其进行增强,以便将多个点云存储在一个文件中,以获得大型3D地图。
#include "main.h"
#include "glut.h"
#include <cmath>
#include <cstdio>
#include <Windows.h>
#include <Ole2.h>
#include <Kinect.h>
// We'll be using buffer objects to store the kinect point cloud
GLuint vboId;
GLuint cboId;
// Intermediate Buffers
unsigned char rgbimage[colorwidth*colorheight*4]; // Stores RGB color image
ColorSpacePoint depth2rgb[width*height]; // Maps depth pixels to rgb pixels
CameraSpacePoint depth2xyz[width*height]; // Maps depth pixels to 3d coordinates
// Kinect Variables
IKinectSensor* sensor; // Kinect sensor
IMultiSourceFrameReader* reader; // Kinect data source
ICoordinateMapper* mapper; // Converts between depth, color, and 3d coordinates
bool initKinect() {
if (FAILED(GetDefaultKinectSensor(&sensor))) {
return false;
}
if (sensor) {
sensor->get_CoordinateMapper(&mapper);
sensor->Open();
sensor->OpenMultiSourceFrameReader(
FrameSourceTypes::FrameSourceTypes_Depth | FrameSourceTypes::FrameSourceTypes_Color,
&reader);
return reader;
} else {
return false;
}
}
void getDepthData(IMultiSourceFrame* frame, GLubyte* dest) {
IDepthFrame* depthframe;
IDepthFrameReference* frameref = NULL;
frame->get_DepthFrameReference(&frameref);
frameref->AcquireFrame(&depthframe);
if (frameref) frameref->Release();
if (!depthframe) return;
// Get data from frame
unsigned int sz;
unsigned short* buf;
depthframe->AccessUnderlyingBuffer(&sz, &buf);
// Write vertex coordinates
mapper->MapDepthFrameToCameraSpace(width*height, buf, width*height, depth2xyz);
float* fdest = (float*)dest;
for (int i = 0; i < sz; i++) {
*fdest++ = depth2xyz[i].X;
*fdest++ = depth2xyz[i].Y;
*fdest++ = depth2xyz[i].Z;
}
// Fill in depth2rgb map
mapper->MapDepthFrameToColorSpace(width*height, buf, width*height, depth2rgb);
if (depthframe) depthframe->Release();
}
void getRgbData(IMultiSourceFrame* frame, GLubyte* dest) {
IColorFrame* colorframe;
IColorFrameReference* frameref = NULL;
frame->get_ColorFrameReference(&frameref);
frameref->AcquireFrame(&colorframe);
if (frameref) frameref->Release();
if (!colorframe) return;
// Get data from frame
colorframe->CopyConvertedFrameDataToArray(colorwidth*colorheight*4, rgbimage, ColorImageFormat_Rgba);
// Write color array for vertices
float* fdest = (float*)dest;
for (int i = 0; i < width*height; i++) {
ColorSpacePoint p = depth2rgb[i];
// Check if color pixel coordinates are in bounds
if (p.X < 0 || p.Y < 0 || p.X > colorwidth || p.Y > colorheight) {
*fdest++ = 0;
*fdest++ = 0;
*fdest++ = 0;
}
else {
int idx = (int)p.X + colorwidth*(int)p.Y;
*fdest++ = rgbimage[4*idx + 0]/255.;
*fdest++ = rgbimage[4*idx + 1]/255.;
*fdest++ = rgbimage[4*idx + 2]/255.;
}
// Don't copy alpha channel
}
if (colorframe) colorframe->Release();
}
void getKinectData() {
IMultiSourceFrame* frame = NULL;
if (SUCCEEDED(reader->AcquireLatestFrame(&frame))) {
GLubyte* ptr;
glBindBuffer(GL_ARRAY_BUFFER, vboId);
ptr = (GLubyte*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
if (ptr) {
getDepthData(frame, ptr);
}
glUnmapBuffer(GL_ARRAY_BUFFER);
glBindBuffer(GL_ARRAY_BUFFER, cboId);
ptr = (GLubyte*)glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
if (ptr) {
getRgbData(frame, ptr);
}
glUnmapBuffer(GL_ARRAY_BUFFER);
}
if (frame) frame->Release();
}
void rotateCamera() {
static double angle = 0.;
static double radius = 3.;
double x = radius*sin(angle);
double z = radius*(1-cos(angle)) - radius/2;
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(x,0,z,0,0,radius/2,0,1,0);
angle += 0.002;
}
void drawKinectData() {
getKinectData();
rotateCamera();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glVertexPointer(3, GL_FLOAT, 0, NULL);
glBindBuffer(GL_ARRAY_BUFFER, cboId);
glColorPointer(3, GL_FLOAT, 0, NULL);
glPointSize(1.f);
glDrawArrays(GL_POINTS, 0, width*height);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
int main(int argc, char* argv[]) {
if (!init(argc, argv)) return 1;
if (!initKinect()) return 1;
// OpenGL setup
glClearColor(0,0,0,0);
glClearDepth(1.0f);
// Set up array buffers
const int dataSize = width*height * 3 * 4;
glGenBuffers(1, &vboId);
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, dataSize, 0, GL_DYNAMIC_DRAW);
glGenBuffers(1, &cboId);
glBindBuffer(GL_ARRAY_BUFFER, cboId);
glBufferData(GL_ARRAY_BUFFER, dataSize, 0, GL_DYNAMIC_DRAW);
// Camera setup
glViewport(0, 0, width, height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, width /(GLdouble) height, 0.1, 1000);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
gluLookAt(0,0,0,0,0,1,0,1,0);
// Main loop
execute();
return 0;
}
答案 0 :(得分:3)
如果您只关注连接点云,那么使用点云之间的+=
运算符可以轻松实现PCL。在PCL网站上有一个关于它的小教程,你可以找到here。
另一方面,如果您正在寻找通过合并和拼接不同点云来构建大地图的方法,则需要在点云之间找到一组相交要素并对其进行变换以使它们重叠在正确的地区。您可以通过构建基于Iterative Closest Point的算法来实现。查看KinFu可能会很有趣,它会实时执行此操作并从扫描的云中生成网格。源代码可在PCL Project github上找到。