我使用OpenCV和两个相同的网络摄像头来重建真实世界的坐标。使用棋盘校准相机,我将参数输入为常数。经过校正的图像对我来说似乎是可以接受的:http://postimg.org/image/ccj5c2swt/然后我使用带有计算Q矩阵和视差图的reprojectImageTo3D()函数来获取真实世界坐标,同时点击视差图上的不同位置。但是,结果z坐标很难理解:http://postimg.org/image/e326dkafx/如果点击某些位置,程序将崩溃。以下是我的完整代码。
#include "stdafx.h"
#include "cv.hpp"
#include "cxcore.hpp"
#include "cvaux.hpp"
#include "math.h"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <vector>
#include <string>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
using namespace cv;
static void onMouse(int event, int x, int y, int flags, void*);
Mat xyz;
int _tmain(int argc, _TCHAR* argv[])
{
Mat img_l = imread("recl1.png", 1);
Mat img_r = imread("recr1.png", 1);
//initialize camera parameters
double camera_matrix_left[] = { 650.90584, 0, 308.65233,
0, 648.69669, 251.15348,
0, 0, 1 };
double camera_matrix_right[] = { 651.50089, 0, 308.65391,
0, 649.74142, 264.51927,
0, 0, 1 };
double dist_coeffs_left[] = { -0.10965, 0.02291, 0.00306, 0.00067,0,0,0,-1.12 };
double dist_coeffs_right[] = { -0.09781, 0.12686, 0.00837, 0.00167,0,0,0,-2.04 };
double rot_vector[] = { 0.01605, 0.00785, -0.02446 };
double rot_matrix[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
double translation[] = { -65.81781, -0.13752, 2.75072 };
Mat _M1 = Mat(3, 3, CV_64FC1, camera_matrix_left);
Mat _M2 = Mat(3, 3, CV_64FC1, camera_matrix_right);
Mat _D1 = Mat(1, 8, CV_64FC1, dist_coeffs_left);
Mat _D2 = Mat(1, 8, CV_64FC1, dist_coeffs_right);
Size imageSize = img_l.size();
Mat _R_vector = Mat(3, 1, CV_64FC1, rot_vector);
Mat _R_matrix = Mat(3, 3, CV_64FC1, rot_matrix);
Mat _T = Mat(3, 1, CV_64FC1, translation);
//Bouguet's method for rectification
Mat _R1, _R2, _P1, _P2, Q, mx1, my1, mx2, my2, imgl_rec, imgr_rec;
Rect roi1, roi2;
stereoRectify(_M1, _D1, _M2, _D2, imageSize, _R_matrix, _T, _R1, _R2, _P1, _P2, Q, CALIB_ZERO_DISPARITY, -1, imageSize, &roi1, &roi2);
Q.at<double>(3, 2) = -Q.at<double>(3, 2);
initUndistortRectifyMap(_M1, _D1, _R1, _P1, imageSize, CV_16SC2, mx1, my1);
initUndistortRectifyMap(_M1, _D1, _R1, _P1, imageSize, CV_16SC2, mx2, my2);
//rectify input images
remap(img_l, imgl_rec, mx1, my1, INTER_LINEAR);
remap(img_r, imgr_rec, mx2, my2, INTER_LINEAR);
Mat rec_pair = Mat(imgl_rec.rows, imgl_rec.cols * 2, imgl_rec.type());
imgl_rec.copyTo(rec_pair(Rect(0, 0, imgl_rec.cols, imgl_rec.rows)));
imgr_rec.copyTo(rec_pair(Rect(imgl_rec.cols, 0, imgr_rec.cols, imgr_rec.rows)));
for (int i = 0; i < rec_pair.rows; i += 32) line(rec_pair, Point(0, i), Point(rec_pair.cols, i), Scalar(0, 0, 255));
namedWindow("Rectified", CV_WINDOW_AUTOSIZE);
imshow("Rectified", rec_pair);
//cvWaitKey(0);
//Setup for finding stereo corrrespondences
int SADWindowSize = 0, numberOfDisparities = 0;
StereoSGBM sgbm;
StereoBM bm;
sgbm.preFilterCap = 63;
sgbm.SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 3;
numberOfDisparities = numberOfDisparities > 0 ? numberOfDisparities : ((imageSize.width / 8) + 15) & -16;
bm.state->roi1 = roi1;
bm.state->roi2 = roi2;
bm.state->preFilterCap = 31;
bm.state->SADWindowSize = SADWindowSize > 0 ? SADWindowSize : 9;
bm.state->minDisparity = 0;
bm.state->numberOfDisparities = numberOfDisparities;
bm.state->textureThreshold = 10;
bm.state->uniquenessRatio = 15;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 32;
bm.state->disp12MaxDiff = 1;
enum { STEREO_BM = 0, STEREO_SGBM = 1, STEREO_HH = 2, STEREO_VAR = 3 };
int cn = imgl_rec.channels();
int alg = STEREO_SGBM;
sgbm.P1 = 8 * cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.P2 = 32 * cn*sgbm.SADWindowSize*sgbm.SADWindowSize;
sgbm.minDisparity = 0;
sgbm.numberOfDisparities = numberOfDisparities;
sgbm.uniquenessRatio = 30;
sgbm.speckleWindowSize = bm.state->speckleWindowSize;
sgbm.speckleRange = bm.state->speckleRange;
sgbm.disp12MaxDiff = 1;
sgbm.fullDP = alg == STEREO_HH;
//solve disparity graph
Mat disp, disp8;
int64 t = getTickCount();
sgbm(imgl_rec, imgr_rec, disp);
t = getTickCount() - t;
printf("Disparity obtained. Time elapsed: %fms\n", t * 1000 / getTickFrequency());
disp.convertTo(disp8, CV_8U, 255 / (numberOfDisparities*16.));
namedWindow("disparity", 0);
imshow("disparity", disp8);
reprojectImageTo3D(disp, xyz, Q, true, CV_32F);
setMouseCallback("disparity", onMouse, 0);
while (1){
int c = waitKey(0);
if ((char)c == 27)
break;
}
return 0;
}
static void onMouse(int event, int x, int y, int flags, void*)
{
if (event == CV_EVENT_LBUTTONDOWN){
Point3f p = xyz.at<Point3f>(x, y);
printf("point <%d, %d> in world coordinate: x=%.2f y=%.2f z=%.2f\n", x, y, p.x, p.y, p.z);
}
}
有人能给我一些建议吗?提前谢谢。
现在我检查了stereoRectify()函数计算的矩阵中的值。以下是结果
R1:
1.00 0.00 -0.04
-0.00 1.00 0.00
0.04 0.00 1.00
P1:
621.65 0.00 343.70
0.00 0.00 621.65
255.82 0.00 0.00
0.00 1.00 0.00
R2:
1.00 0.00 -0.04
-0.00 1.00 0.00
0.04 0.00 1.00
P2:
621.65 0.00 343.70
-40951.56 0.00 621.65
255.82 0.00 0.00
0.00 1.00 0.00
Q:
1.00 0.00 0.00 -343.70
0.00 1.00 0.00 -255.82
0.00 0.00 0.00 621.65
0.00 0.00 -0.02 -0.00
我认为这些矩阵可能有问题,尤其是Q矩阵。