一个多月以来,我一直在尝试对尸体进行3D渲染,我感到非常困惑。我一直在使用处理程序(特别是修改Daniel Schiffman创建的点云示例)来从1 Kinect创建点云,但是无法弄清楚如何在Mac上组合来自2种kinects的云。我可以记录2个点云,然后在其他软件中将它们合并吗?每当我导出点云时,背景都会变为黑色。这是我正在使用的完整代码:`
import org.openkinect.freenect.*;
import org.openkinect.processing.*;
import codeanticode.syphon.*;
// Kinect Library object
Kinect kinect;
SyphonServer server;
// Angle for rotation
float r = radians(20);
// We'll use a lookup table so that we don't have to repeat the math over and over
float[] depthLookUp = new float[2048];
void setup() {
// Rendering in P3D
//for tim
// size(2400, 400, P3D);
size(1280, 720, P3D);
kinect = new Kinect(this);
kinect.initDepth();
server = new SyphonServer(this, "Processing Syphon");
// Lookup table for all possible depth values (0 - 2047)
for (int i = 0; i < depthLookUp.length; i++) {
depthLookUp[i] = rawDepthToMeters(i);
}
}
void draw() {
background(0,0,0,.1);
// Get the raw depth as array of integers
int[] depth = kinect.getRawDepth();
// We're just going to calculate and draw every 4th pixel (equivalent of 160x120)
int skip = 2;
// Translate and rotate
translate(width/2, height/2);
//rotateX(r);
rotateY(r);
//rotateZ(r);
// Nested for loop that initializes x and y pixels and, for those less than the
// maximum threshold and at every skiping point, the offset is caculated to map
// them on a plane instead of just a line
for (int x = 0; x < kinect.width; x += skip) {
for (int y = 0; y < kinect.height; y += skip) {
int offset = x + y*kinect.width;
// Convert kinect data to world xyz coordinate
int rawDepth = depth[offset];
PVector v = depthToWorld(x, y, rawDepth);
///if (v < kinect.width/2) {
//stroke(204, 102, 0);
//}
//stroke(0, 102, 0);
stroke(255,255,255);
pushMatrix();
// Scale up by 400
float factor = 500;
translate(v.x*factor, v.y*factor, factor-v.z*factor);
// Draw a point
point(0, 0);
popMatrix();
}
}
// Rotate
r += 0.00f;
server.sendScreen();
//saveFrame("output1/dancer_####.png");
}
// These functions come from: http://graphics.stanford.edu/~mdfisher/Kinect.html
float rawDepthToMeters(int depthValue) {
if (depthValue < 947) {
return (float)(1.0 / ((double)(depthValue) * -0.0030711016 + 3.3309495161 ));
}
return 0.0f;
}
// Only needed to make sense of the ouput depth values from the kinect
PVector depthToWorld(int x, int y, int depthValue) {
final double fx_d = 1.0 / 5.9421434211923247e+02;
final double fy_d = 1.0 / 5.9104053696870778e+02;
final double cx_d = 3.3930780975300314e+02;
final double cy_d = 2.4273913761751615e+02;
// Drawing the result vector to give each point its three-dimensional space
PVector result = new PVector();
double depth = depthLookUp[depthValue];//rawDepthToMeters(depthValue);
result.x = (float)((x - cx_d) * depth * fx_d);
result.y = (float)((y - cy_d) * depth * fy_d);
result.z = (float)(depth);
return result;
}`