C ++ Kinect v2& freenect2:如何将深度数据转换为真实世界坐标

时间:2016-09-08 11:05:23

标签: c++ linux kinect libfreenect2

我正在尝试使用Kinect v2相机(在Linux中)计算真实世界的xyz坐标,但我的计算给出了错误的结果。

以下是代码:

cv::Point3f xyzWorld={0.0f};

xyzWorld.z = pointDepth;
xyzWorld.x = (float) ((float)x -(depthcx)) * xyzWorld.z / depthfx;
xyzWorld.y = (float) ((float)y - (depthcy)) * xyzWorld.z / depthfy;
xyzWorld.z = pointDepth;

return xyzWorld;

我认为问题源于fxfycxcy的深度值。

有人能帮助我吗?

我正在使用freenect2。

1 个答案:

答案 0 :(得分:1)

为什么不直接使用OpenNi实现

 OniStatus VideoStream::convertDepthToWorldCoordinates(float depthX, float depthY, float depthZ, float* pWorldX, float* pWorldY, float* pWorldZ)
{
    if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
    {
        m_errorLogger.Append("convertDepthToWorldCoordinates: Stream is not from DEPTH\n");
        return ONI_STATUS_NOT_SUPPORTED;
    }

    float normalizedX = depthX / m_worldConvertCache.resolutionX - .5f;
    float normalizedY = .5f - depthY / m_worldConvertCache.resolutionY;

    OniVideoMode videoMode;
    int size = sizeof(videoMode);
    getProperty(ONI_STREAM_PROPERTY_VIDEO_MODE, &videoMode, &size);

    float const convertToMillimeters = (videoMode.pixelFormat == ONI_PIXEL_FORMAT_DEPTH_100_UM) ? 10.f : 1.f;
    *pWorldX = (normalizedX * depthZ * m_worldConvertCache.xzFactor) / convertToMillimeters;
    *pWorldY = (normalizedY * depthZ * m_worldConvertCache.yzFactor) / convertToMillimeters;
    *pWorldZ = depthZ / convertToMillimeters;

    return ONI_STATUS_OK;
}

OniStatus VideoStream::convertWorldToDepthCoordinates(float worldX, float worldY, float worldZ, float* pDepthX, float* pDepthY, float* pDepthZ)
{
    if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
    {
        m_errorLogger.Append("convertWorldToDepthCoordinates: Stream is not from DEPTH\n");
        return ONI_STATUS_NOT_SUPPORTED;
    }

    *pDepthX = m_worldConvertCache.coeffX * worldX / worldZ + m_worldConvertCache.halfResX;
    *pDepthY = m_worldConvertCache.halfResY - m_worldConvertCache.coeffY * worldY / worldZ;
    *pDepthZ = worldZ;
    return ONI_STATUS_OK;
}

和世界转换缓存:

 void VideoStream::refreshWorldConversionCache()
{
    if (m_pSensorInfo->sensorType != ONI_SENSOR_DEPTH)
    {
        return;
    }

    OniVideoMode videoMode;
    int size = sizeof(videoMode);
    getProperty(ONI_STREAM_PROPERTY_VIDEO_MODE, &videoMode, &size);

    size = sizeof(float);
    float horizontalFov;
    float verticalFov;
    getProperty(ONI_STREAM_PROPERTY_HORIZONTAL_FOV, &horizontalFov, &size);
    getProperty(ONI_STREAM_PROPERTY_VERTICAL_FOV, &verticalFov, &size);

    m_worldConvertCache.xzFactor = tan(horizontalFov / 2) * 2;
    m_worldConvertCache.yzFactor = tan(verticalFov / 2) * 2;
    m_worldConvertCache.resolutionX = videoMode.resolutionX;
    m_worldConvertCache.resolutionY = videoMode.resolutionY;
    m_worldConvertCache.halfResX = m_worldConvertCache.resolutionX / 2;
    m_worldConvertCache.halfResY = m_worldConvertCache.resolutionY / 2;
    m_worldConvertCache.coeffX = m_worldConvertCache.resolutionX / m_worldConvertCache.xzFactor;
    m_worldConvertCache.coeffY = m_worldConvertCache.resolutionY / m_worldConvertCache.yzFactor;
}

struct WorldConversionCache
    {
        float xzFactor;
        float yzFactor;
        float coeffX;
        float coeffY;
        int resolutionX;
        int resolutionY;
        int halfResX;
        int halfResY;
    } m_worldConvertCache;
全部取自 OpenNI GitHub repository

您可以直接从每个帧的描述中获得水平和垂直fov。