我有Kinect的屏幕空间坐标。我需要为我的opengl相机从kinect获取世界空间坐标;或者我需要将相机放置在与Kinect相同的位置,以便可以看到从Kinect获得的点。我想我需要世界坐标w.r.t.我的opengl相机也可以对我正在绘制的其他对象进行碰撞检测。如何获得此ScreenspaceToWorld映射?
我使用this答案作为参考,但是我没有Kinect的位置和方向来计算“视图和投影”矩阵。
编辑:
我需要能够在与Kinect相同的坐标空间中渲染对象。为了做到这一点,我使用了使用这些值的投影矩阵:
近平面:0.01f
平面:1700.0f
水平FOV :60.0f
垂直视野:45.0f
相机位置 :( 0.0f,2.0f,3.0f)
相机方向:(0.0f,2.0f,-3.0f)
使用这些值,我看不到任何呈现的对象。这是我的渲染代码:
GameRenderer::GameRenderer()
{
tracker = new GestureTracker();
tracker->init();
tracker->startGestureDetection();
}
bool GameRenderer::Init(int argc, char* argv[])
{
GLfloat points[] = { 0.0f, 0.5f, 0.0f, 0.5f, -0.5f, 0.0f, -0.5f, -0.5f, 0.0f };
GLfloat point[] = { 0.0f, 0.0f, 0.0f };
glGenBuffers(1, &m_vbo);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(points), points, GL_DYNAMIC_DRAW);
// vao
glGenVertexArrays(1, &m_vao);
glBindVertexArray(m_vao);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
const char *vertex_shader = "#version 410\n"
"in vec3 vp;"
"uniform mat4 model_matrix;"
"uniform mat4 view_matrix;"
"uniform mat4 projection_matrix;"
"void main () {"
" gl_Position = projection_matrix * view_matrix * model_matrix * vec4(vp, 1.0);"
"}";
const char *fragment_shader = "#version 410\n"
"out vec4 frag_colour;"
"void main () {"
" frag_colour = vec4(0.0f, 0.5f, 1.0f, 1.0f);"
"}";
m_vert_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(m_vert_shader, 1, &vertex_shader, NULL);
glCompileShader(m_vert_shader);
int params = -1;
glGetShaderiv( m_vert_shader, GL_COMPILE_STATUS, ¶ms );
if ( GL_TRUE != params ) {
ShaderLog(m_vert_shader);
return 1;
}
m_frag_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(m_frag_shader, 1, &fragment_shader, NULL);
glCompileShader(m_frag_shader);
params = -1;
glGetShaderiv( m_frag_shader, GL_COMPILE_STATUS, ¶ms );
if ( GL_TRUE != params ) {
ShaderLog(m_frag_shader);
return 1;
}
m_shader_program = glCreateProgram();
glAttachShader(m_shader_program, m_frag_shader);
glAttachShader(m_shader_program, m_vert_shader);
glLinkProgram(m_shader_program);
this->m_model_matrix = glm::mat4(1.0f);
m_model_matrix = glm::scale(m_model_matrix, glm::vec3(4.0f));
// m_model_matrix = glm::rotate(m_model_matrix, glm::radians(10.0f), glm::vec3(0.0f));
m_model_matrix = glm::translate(m_model_matrix, glm::vec3(0.0f));
this->m_view_matrix = glm::mat4(1.0f);
this->m_proj_matrix = glm::mat4(1.0f);
GLuint model_mat_location = glGetUniformLocation(this->m_shader_program, "model_matrix");
glUseProgram(this->m_shader_program);
glUniformMatrix4fv(model_mat_location, 1, GL_FALSE, glm::value_ptr(this->m_model_matrix));
return 1;
}
void GameRenderer::Draw(Camera& camera)
{
auto hands = tracker->getHands();
if(hands.size()) {
Point3f coordinates = hands[0].getHandPosition();
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
GLfloat hand_coordinate[3] = { coordinates.x, coordinates.y, coordinates.z };
std::cout << coordinates.x << " " << coordinates.y << " " << coordinates.z << std::endl;
// glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 3, hand_coordinate, GL_DYNAMIC_DRAW);
this->m_model_matrix = glm::mat4(1.0f);
m_model_matrix = glm::scale(m_model_matrix, glm::vec3(10.0f));
m_model_matrix = glm::translate(m_model_matrix, glm::vec3(coordinates.x, coordinates.y, coordinates.z));
glUseProgram(this->m_shader_program);
GLuint loc = glGetUniformLocation(this->m_shader_program, "model_matrix");
glUniformMatrix4fv(loc, 1, GL_FALSE, glm::value_ptr(this->m_model_matrix));
glUseProgram(0);
}
this->m_view_matrix = camera.GetViewMatrix();
this->m_proj_matrix = glm::perspective(glm::radians(camera.GetFieldOfView()),
60.0f/45.0f, camera.GetNearPlane(), camera.GetFarPlane());
GLuint view_mat_location = glGetUniformLocation(this->m_shader_program, "view_matrix");
glUseProgram(this->m_shader_program);
glUniformMatrix4fv(view_mat_location, 1, GL_FALSE, glm::value_ptr(this->m_view_matrix));
GLuint proj_mat_location = glGetUniformLocation(this->m_shader_program, "projection_matrix");
glUseProgram(this->m_shader_program);
glUniformMatrix4fv(proj_mat_location, 1, GL_FALSE, glm::value_ptr(this->m_proj_matrix));
CheckGLError();
glUseProgram(m_shader_program);
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
// glPointSize(50.0f);
glDrawArrays(GL_TRIANGLES, 0, 3);
glUseProgram(0);
CheckGLError();
}
Kinect深度框的坐标值(这些是Kinect跟踪的手坐标)是:
x y z
-75.1628 136.374 650.176
-73.7582 141.239 665.259
-69.9152 148.691 702.163
-67.1218 151.315 723.226
-64.4887 153.013 745.074
-62.3365 153.783 767.393
-62.3365 153.783 767.393
-60.1887 152.979 789.881
-57.9948 150.762 812.395
-55.8624 146.575 834.187
-56.2673 133.543 873.035
当Kinect检测到“手”时,我的几何图形(现在是三角形)将停止渲染,尽管它之前已渲染(因为它是在原点)。
需要在我的视图/投影矩阵或代码中进行哪些纠正才能与Kinect的空间相同?