使用有关opencv的realsense2将深度图像映射到彩色图像

时间:2018-07-05 07:36:19

标签: opencv camera-calibration realsense

我使用librealsense2库。

我指的是这个网站。https://github.com/IntelRealSense/librealsense/blob/master/examples/align/rs-align.cpp

使用realsense2库将深度图像映射到彩色图像后, 我想用opencv Mat(imshow)函数显示图像。

所以我编码为..

#include "librealsense2/rs.hpp"
#include <opencv2/opencv.hpp>



#include <sstream>
#include <iostream>
#include <fstream>
#include <algorithm>
#include <cstring>

using namespace std;
using namespace cv;

void remove_background(rs2::video_frame& other, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist);
float get_depth_scale(rs2::device dev);
rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams);
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev);


int main(int args, char * argv[]) try
{
    // Create and initialize GUI related objects

    rs2::colorizer c; 
    rs2::config cfg;
    rs2::pipeline pipe;
    const int width = 1280;
    const int height = 720;

    c.set_option(RS2_OPTION_HISTOGRAM_EQUALIZATION_ENABLED, 1.f);
    c.set_option(RS2_OPTION_COLOR_SCHEME, 2.f); // White to Black

    cfg.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_BGR8, 30);
    cfg.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, 30);


    rs2::pipeline_profile profile = pipe.start(cfg);

    float depth_scale = get_depth_scale(profile.get_device()); 


    rs2_stream align_to = find_stream_to_align(profile.get_streams());


    rs2::align align(align_to);

    float depth_clipping_distance = 3.f;

    while (true)
    {

        rs2::frameset frameset = pipe.wait_for_frames();

        if (profile_changed(pipe.get_active_profile().get_streams(), profile.get_streams()))
        {
            profile = pipe.get_active_profile();
            align_to = find_stream_to_align(profile.get_streams());
            align = rs2::align(align_to);
            depth_scale = get_depth_scale(profile.get_device());
        }

        auto processed = align.process(frameset);

        rs2::video_frame other_frame = processed.first(align_to);
        rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());

        if (!aligned_depth_frame || !other_frame)
        {
            continue;
        }

        remove_background(other_frame, aligned_depth_frame, depth_scale, depth_clipping_distance);



        Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
        Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);


        namedWindow("other window", WINDOW_AUTOSIZE);
        namedWindow("depth window", WINDOW_AUTOSIZE);

        imshow("other window", other_frameaM);
        imshow("depth window", aligned_depthM);
    }

    return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
    std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n    " << e.what() << std::endl;
    return EXIT_FAILURE;
}
catch (const std::exception & e)
{
    std::cerr << e.what() << std::endl;
    return EXIT_FAILURE;
}

float get_depth_scale(rs2::device dev)
{
    // Go over the device's sensors
    for (rs2::sensor& sensor : dev.query_sensors())
    {
        // Check if the sensor if a depth sensor
        if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>())
        {
            return dpt.get_depth_scale();
        }
    }
    throw std::runtime_error("Device does not have a depth sensor");
}


void remove_background(rs2::video_frame& other_frame, const rs2::depth_frame& depth_frame, float depth_scale, float clipping_dist)
{
    const uint16_t* p_depth_frame = reinterpret_cast<const uint16_t*>(depth_frame.get_data());
    uint8_t* p_other_frame = reinterpret_cast<uint8_t*>(const_cast<void*>(other_frame.get_data()));

    int width = other_frame.get_width();
    int height = other_frame.get_height();
    int other_bpp = other_frame.get_bytes_per_pixel();

#pragma omp parallel for schedule(dynamic) //Using OpenMP to try to parallelise the loop
    for (int y = 0; y < height; y++)
    {
        auto depth_pixel_index = y * width;
        for (int x = 0; x < width; x++, ++depth_pixel_index)
        {
            // Get the depth value of the current pixel
            auto pixels_distance = depth_scale * p_depth_frame[depth_pixel_index];

            // Check if the depth value is invalid (<=0) or greater than the threashold
            if (pixels_distance <= 0.f || pixels_distance > clipping_dist)
            {
                // Calculate the offset in other frame's buffer to current pixel
                auto offset = depth_pixel_index * other_bpp;

                // Set pixel to "background" color (0x999999)
                std::memset(&p_other_frame[offset], 0x99, other_bpp);
            }
        }
    }
}

rs2_stream find_stream_to_align(const std::vector<rs2::stream_profile>& streams)
{
    //Given a vector of streams, we try to find a depth stream and another stream to align depth with.
    //We prioritize color streams to make the view look better.
    //If color is not available, we take another stream that (other than depth)
    rs2_stream align_to = RS2_STREAM_ANY;
    bool depth_stream_found = false;
    bool color_stream_found = false;
    for (rs2::stream_profile sp : streams)
    {
        rs2_stream profile_stream = sp.stream_type();
        if (profile_stream != RS2_STREAM_DEPTH)
        {
            if (!color_stream_found)         //Prefer color
                align_to = profile_stream;

            if (profile_stream == RS2_STREAM_COLOR)
            {
                color_stream_found = true;
            }
        }
        else
        {
            depth_stream_found = true;
        }
    }

    if (!depth_stream_found)
        throw std::runtime_error("No Depth stream available");

    if (align_to == RS2_STREAM_ANY)
        throw std::runtime_error("No stream found to align with Depth");

    return align_to;
}
bool profile_changed(const std::vector<rs2::stream_profile>& current, const std::vector<rs2::stream_profile>& prev)
{
    for (auto&& sp : prev)
    {
        //If previous profile is in current (maybe just added another)
        auto itr = std::find_if(std::begin(current), std::end(current), [&sp](const rs2::stream_profile& current_sp) { return sp.unique_id() == current_sp.unique_id(); });
        if (itr == std::end(current)) //If it previous stream wasn't found in current
        {
            return true;
        }
    }
    return false;
}

只有灰屏,什么也没发生。

 Mat other_frameaM(Size(width, height), CV_8UC3, (void*)other_frame.get_data(), Mat::AUTO_STEP);
Mat aligned_depthM(Size(width, height), CV_8UC3, (void*)aligned_depth_frame.get_data(), Mat::AUTO_STEP);

我想没有问题。因为深度图像和rgb图像以CV_8UC3格式打开得很好。

但是,当我尝试校准然后在opencv中获得它时,图像仅出现在灰屏中。

auto frames = pipe.wait_for_frames(); // Wait for next set of frames from the camera

rs2::video_frame color = frames.get_color_frame();
rs2::depth_frame depth = color_map(frames.get_depth_frame());

if (!color)
    color = frames.get_infrared_frame();

Mat colorM(Size(width, height), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
Mat depthM(Size(width, height), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);

这是输出彩色图像和深度图像的代码的一部分。 效果很好。

所以我猜..

rs2::video_frame other_frame = processed.first(align_to);
rs2::depth_frame aligned_depth_frame = c(processed.get_depth_frame());

无论执行什么过程,我都认为它将运行,因为它以帧格式获取它。我认为我在代码方面有很大的错误。

哪一部分错了?

enter image description here

1 个答案:

答案 0 :(得分:2)

有几种方法可以将图像存储在内存中。不能保证您可以仅传递缓冲区,并且缓冲区将全部工作。尝试逐像素复制。 您应该知道OpenCV使用 BGR 交错图像格式,而realsense might use another