OpenKinect获取原始深度图像

时间:2016-10-20 15:27:09

标签: kinect openkinect

我正在尝试使用here中的示例代码。

为了将图像保存到计算机,我进行了一些更改。当我在MATLAB中读取数据时,似乎应该为0的值设置为2047,并且当我使用默认的内部相机参数重建3D点时,整体看起来似乎不正确。

我想要实现的是保存图像以便我可以使用     img = single(imread(depth.png'))/ 1000 并且具有以米为单位的深度值,没有测量的像素应为零。

顺便说一句是Kinect V1。

以下是我试图更改的评论代码。

#include "libfreenect.hpp"
#include <iostream>
#include <vector>
#include <cmath>
#include <pthread.h>
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>


using namespace cv;
using namespace std;


class myMutex {
    public:
        myMutex() {
            pthread_mutex_init( &m_mutex, NULL );
        }
        void lock() {
            pthread_mutex_lock( &m_mutex );
        }
        void unlock() {
            pthread_mutex_unlock( &m_mutex );
        }
    private:
        pthread_mutex_t m_mutex;
};

// Should one use FREENECT_DEPTH_REGISTERED instead of FREENECT_DEPTH_11BIT? 
class MyFreenectDevice : public Freenect::FreenectDevice {
    public:
        MyFreenectDevice(freenect_context *_ctx, int _index)
            : Freenect::FreenectDevice(_ctx, _index), m_buffer_depth(FREENECT_DEPTH_11BIT),
            m_buffer_rgb(FREENECT_VIDEO_RGB), m_gamma(2048), m_new_rgb_frame(false),
            m_new_depth_frame(false), depthMat(Size(640,480),CV_16UC1),
            rgbMat(Size(640,480), CV_8UC3, Scalar(0)),
            ownMat(Size(640,480),CV_8UC3,Scalar(0)) {

            for( unsigned int i = 0 ; i < 2048 ; i++) {
                float v = i/2048.0;
                v = std::pow(v, 3)* 6;
                m_gamma[i] = v*6*256;
            }
        }

        // Do not call directly even in child
        void VideoCallback(void* _rgb, uint32_t timestamp) {
            std::cout << "RGB callback" << std::endl;
            m_rgb_mutex.lock();
            uint8_t* rgb = static_cast<uint8_t*>(_rgb);
            rgbMat.data = rgb;
            m_new_rgb_frame = true;
            m_rgb_mutex.unlock();
        };

        // Do not call directly even in child
        void DepthCallback(void* _depth, uint32_t timestamp) {
            std::cout << "Depth callback" << std::endl;
            m_depth_mutex.lock();
            uint16_t* depth = static_cast<uint16_t*>(_depth);
            // Here I use memcpy instead so I can use uint16
            // memcpy(depthMat.data,depth,depthMat.rows*depthMat.cols*sizeof(uint16_t));

            depthMat.data = (uchar*) depth;
            m_new_depth_frame = true;
            m_depth_mutex.unlock();
        }

        bool getVideo(Mat& output) {
            m_rgb_mutex.lock();
            if(m_new_rgb_frame) {
                cv::cvtColor(rgbMat, output, CV_RGB2BGR);
                m_new_rgb_frame = false;
                m_rgb_mutex.unlock();
                return true;
            } else {
                m_rgb_mutex.unlock();
                return false;
            }
        }

        bool getDepth(Mat& output) {
                m_depth_mutex.lock();
                if(m_new_depth_frame) {
                    depthMat.copyTo(output);
                    m_new_depth_frame = false;
                    m_depth_mutex.unlock();
                    return true;
                } else {
                    m_depth_mutex.unlock();
                    return false;
                }
            }
    private:

        // Should it be uint16_t instead or even higher?
        std::vector<uint8_t> m_buffer_depth;
        std::vector<uint8_t> m_buffer_rgb;
        std::vector<uint16_t> m_gamma;
        Mat depthMat;
        Mat rgbMat;
        Mat ownMat;
        myMutex m_rgb_mutex;
        myMutex m_depth_mutex;
        bool m_new_rgb_frame;
        bool m_new_depth_frame;
};


int main(int argc, char **argv) {
    bool die(false);
    string filename("snapshot");
    string suffix(".png");
    int i_snap(0),iter(0);

    Mat depthMat(Size(640,480),CV_16UC1);
    Mat depthf (Size(640,480),CV_8UC1);
    Mat rgbMat(Size(640,480),CV_8UC3,Scalar(0));
    Mat ownMat(Size(640,480),CV_8UC3,Scalar(0));

    // The next two lines must be changed as Freenect::Freenect
    // isn't a template but the method createDevice:
    // Freenect::Freenect<MyFreenectDevice> freenect;
    // MyFreenectDevice& device = freenect.createDevice(0);
    // by these two lines:

    Freenect::Freenect freenect;
    MyFreenectDevice& device = freenect.createDevice<MyFreenectDevice>(0);

    namedWindow("rgb",CV_WINDOW_AUTOSIZE);
    namedWindow("depth",CV_WINDOW_AUTOSIZE);
    device.startVideo();
    device.startDepth();
    while (!die) {
        device.getVideo(rgbMat);
        device.getDepth(depthMat);
        // Here I save the depth images
        std::ostringstream file;
        file << filename << i_snap << suffix;
        cv::imwrite(file.str(),depthMat);

        cv::imshow("rgb", rgbMat);
        depthMat.convertTo(depthf, CV_8UC1, 255.0/2048.0);
        cv::imshow("depth",depthf);

        if(iter >= 1000) break;
        iter++;
    }

    device.stopVideo();
    device.stopDepth();
    return 0;
}

提前致谢!

埃里克

2 个答案:

答案 0 :(得分:1)

我对OpenKinect没有任何经验;但你的深度缓冲区应该是uint16吗?

std::vector<uint8_t> m_buffer_depth;

也;对于Matlab,请检查您正在阅读的图像是uint16还是uint8。如果是后者,则将其转换为uint16

uint16(imread('depth.png'));

抱歉无济于事。希望这会有所帮助。

答案 1 :(得分:0)

  1. 您拥有的值是原始深度值。您需要将这些重新映射到MM才能使数字有意义。 Kinect 1可以看到长达10米。所以我会使用raw_values / 2407 * 10000。

  2. 如果值在2047饱和,则可能使用FREENECT_DEPTH_11BIT_PACKED深度格式。

  3. 对于Matlab的工作,使用FREENECT_DEPTH_MM或FREENECT_DEPTH_REGISTERED总是更容易。

  4. 享受。