Mat generateDisparityMap(Mat& left, Mat& right)
{
Mat lb, rb;
if (left.empty() || right.empty())
return left;
cvtColor(left, lb, CV_BGR2GRAY);
cvtColor(right, rb, CV_BGR2GRAY);
const Size imsize = lb.size();
const int32_t dims[3] = { imsize.width,imsize.height,imsize.width };
Mat leftdpf = Mat::zeros(imsize, CV_32F);
Mat rightdpf = Mat::zeros(imsize, CV_32F);
Elas::parameters param(Elas::MIDDLEBURY);
param.postprocess_only_left = true;
Elas elas(param);
elas.process(lb.data, rb.data, leftdpf.ptr<float>(0), rightdpf.ptr<float>
(0), dims);
Mat show = Mat(left.rows, left.cols, CV_8UC1, Scalar(0));
leftdpf.convertTo(show, CV_8U, 5.);
int max_disp = -1;
for (int i = 0; i < imsize.width; i++) {
for (int j = 0; j < imsize.height; j++) {
if (show.at<uchar>(j,i) > max_disp)
max_disp = show.at<uchar>(j,i);
}
}
for (int i = 0; i < imsize.width; i++) {
for (int j = 0; j < imsize.height; j++) {
show.at<uchar>(j,i) =
(int)max(255.0*(float)show.at<uchar>(j,i)/max_disp,0.0);
}
}
//return dmap;
return show;
//return show;
}
请查看link中显示的代码生成的视差地图的图像。
我已经看到了LIBELAS库在线生成的结果,它们看起来很完美。我的代码运行没有任何错误,但我得到模糊的扭曲地图请让我知道我的代码的任何修改。我正在使用visual studio 2017 IDE和opencv 3.3.0贡献库。
修改 我尝试使用代码来查找链接https://github.com/opencv/opencv_contrib/blob/master/modules/ximgproc/samples/disparity_filtering.cpp中给出的差异。然而,在某些领域,差异图似乎是错误的。远离相机的一些物体看起来比较近的物体更亮。我试图通过将视差值乘以校准矩阵Q来计算实际深度。计算的深度与实际测量值相差很远。我确信矩阵Q是正确的,因为我纠正的图像看起来很好。我校准的方形尺寸值也是准确的(0.05米)。我的视差图像位于给定链接https://photos.app.goo.gl/YWPc6yq7XAmUpkk62中。
这是用于根据存储在filtered_disp_vis中的滤波视差图像计算实际深度的附加代码。
fs1["Q"] >> Q;
Mat Image;
Mat V = Mat(4, 1, CV_64FC1);
Mat pos = Mat(4, 1, CV_64FC1);
vector< Point3d > points;
//float fMaxDistance = static_cast<float>((1. / Q.at<double>(3, 2)) * Q.at<double>(2, 3));
//filtered_disp_vis.convertTo(filtered_disp_vis, CV_64FC1, 1.0 / 16.0, 0.0);
//imshow("filtered disparity", filtered_disp_vis);
// outputDisparityValue is single 16-bit value from disparityMap
// DISP_SCALE = 16
//float fDisparity = outputDisparityValue / (float)StereoMatcher::DISP_SCALE;
//float fDistance = fMaxDistance / fDisparity;
reprojectImageTo3D(filtered_disp_vis, Image, Q, false, CV_32F);
//cout << Image;
for (int i = 0; i < filtered_disp_vis.cols; i++)
{
for (int j = 0; j < filtered_disp_vis.rows; j++)
{
int d = filtered_disp_vis.at<uchar>(j, i);
//filtered_disp_vis.convertTo(filtered_disp_vis, CV_32F, 1.0 / 16.0, 0.0);
//int l = img_left.at<uchar>(j, i);
//cout << "(" << j << "," << i << ")" << "=" << d;
//out << endl;
// if low disparity, then ignore
/*if (d < 2) {
continue;
}*/
// V is the vector to be multiplied to Q to get
// the 3D homogenous coordinates of the image point
V.at<double>(0, 0) = (double)(i);
V.at<double>(1, 0) = (double)(j);
V.at<double>(2, 0) = (double)d;
V.at<double>(3, 0) = 1.;
pos = Q * V; // 3D homogeneous coordinate
double X = pos.at<double>(0, 0) / pos.at<double>(3, 0);
double Y = pos.at<double>(1, 0) / pos.at<double>(3, 0);
double Z = pos.at<double>(2, 0) / pos.at<double>(3, 0);
if (i == 446 && j == 362)
{
cout << "(" << j << "," << i << ")" << " = ";
cout << X << " " << Y << " " << Z << " " << d;
cout << endl;
}
Mat point3d_cam = Mat(3, 1, CV_64FC1);
point3d_cam.at<double>(0, 0) = X;
point3d_cam.at<double>(1, 0) = Y;
point3d_cam.at<double>(2, 0) = Z;
// transform 3D point from camera frame to robot frame
//Mat point3d_robot = XR * point3d_cam + XT;
points.push_back(Point3d(point3d_cam));
}
我哪里错了?我的片段的任何修改或不同的建议,以获得具有准确深度值的正确视差图将是值得赞赏的。
答案 0 :(得分:0)
我认为这不是LIBELAS问题,而是转换问题。我不确定您生成的图像的范围。但直接将CV_32F
转换为CV_8U
通常不是一个好主意,您将丢失信息并且它也将取决于范围......
此外,在将值转换为8U后对值进行标准化,这可能会产生问题,因为您丢失了信息,然后最大值可能是错误的值。
如果仅显示,您可以使用OpenCV中的normalize功能。
cv::Mat show;
cv::normalize(leftdpf, show, 0, 255, cv::NORM_MINMAX, CV_8U);
这将显示一个类型为CV_8U的图像,其值已标准化以适合范围(0-255)。