!我正在使用opencv和点云库进行三维建模工作。 我做了很多研究,并在cameraCalibration之后知道了 - >然后使用立体校正 - > reprojectImageto3d - >使用PointCloud在3d查看器中显示结果。
由于我已经完成了所有步骤,但我无法达到我真正想要的结果。我面临的问题是在立体定制之后,initUndistortrectifymap和重映射给我非常糟糕的结果,就像只有3行的类型,因为这些3d视图结果非常糟糕。 请帮助我,因为我有这么短的时间来完成这项工作 附: 我无法附上我的结果的任何图片因为我是新的,我听说我要做50多个声誉等。 我发布我的代码可能会对你有帮助。 下面是我正在使用的代码。
Mat imgLeft = imread("trible.jpg", 0);
Mat imgRight = imread("trible1.JPG", 0);
cv::Mat_<double> CameraMatrix1(3, 3);
cv::Mat_<double> CameraMatrix2(3, 3);
cv::Mat_<double> disCoeffs1(8, 1);
cv::Mat_<double> disCoeffs2(8, 1);
cv::Mat_<double> R(3,3);
cv::Mat_<double> T(3, 1);
CameraMatrix1 << 3.7418800905573778e+002, 0, 2.8292646601205792e+002, 0,
3.7418800905573778e+002, 1.9600567927223364e+002, 0, 0, 1;
CameraMatrix2 << 3.7418800905573778e+002, 0, 1.9738001176691122e+002, 0,
3.7418800905573778e+002, 1.9213950093058386e+002, 0, 0, 1;
disCoeffs1 << 8.4846284211974010e-001, -6.8586657466127905e+000, 0, 0, 0,
0, 0, -2.1939018609332440e+001;
disCoeffs2 << -1.0450215273644869e+000, 5.6341039084121618e+001, 0, 0, 0,
0, 0, 2.5604871618687031e+002;
R << 9.9004326094229356e-001, -3.0104115731476408e-002,
-1.3750666776115050e-001, 4.9787774041489000e-002,
9.8860863120095721e-001, 1.4203574152643209e-001,
1.3166441819543800e-001, -1.4746767961487581e-001,
9.8026412994165057e-001;
T << 3.6832840828600544e+000, -1.3250441137852145e+000,
3.1984415574963926e-001;
cv::Mat R1, R2, P1, P2, Q;
cv::stereoRectify(CameraMatrix1, disCoeffs1, CameraMatrix2, disCoeffs2, imgLeft.size(), R, T, R1, R2, P1, P2, Q, 0, 0, imgLeft.size(),0,0);
double Q03, Q13, Q23, Q32, Q33;
Q03 = Q.at<double>(0, 3);
Q13 = Q.at<double>(1, 3);
Q23 = Q.at<double>(2, 3);
Q32 = Q.at<double>(3, 2);
Q33 = Q.at<double>(3, 3);
std::cout << "Q(0,3) = " << Q03 << "; Q(1,3) = " << Q13 << "; Q(2,3) = " << Q23 << "; Q(3,2) = " << Q32 << "; Q(3,3) = " << Q33 << ";" << std::endl;
Mat map11, map12, map21, map22;
initUndistortRectifyMap(CameraMatrix1, disCoeffs1, R1, P1, imgLeft.size(), CV_32FC1, map11, map12);
initUndistortRectifyMap(CameraMatrix2, disCoeffs2, R2, P2, imgRight.size(), CV_32FC1, map21, map22);
Mat img1r, img2r;
remap(imgLeft, img1r, map11, map12, INTER_LINEAR);
remap(imgRight, img2r, map21, map22, INTER_LINEAR);
imgLeft = img1r;
imgRight = img2r;
Mat disp, disp8;
char* method = "BM";
if (!(strcmp(method, "BM")))
{
StereoBM sbm;
sbm.state->SADWindowSize = 25;
sbm.state->numberOfDisparities = 112; //64; //112
sbm.state->preFilterSize = 5;
sbm.state->preFilterCap = 61;
sbm.state->minDisparity = -39;
sbm.state->textureThreshold = 150; //507
sbm.state->uniquenessRatio = 0;
sbm.state->speckleWindowSize = 0;
sbm.state->speckleRange = 8;
sbm.state->disp12MaxDiff = 1;
sbm(imgLeft, imgRight, disp);
}
else if (!(strcmp(method, "SGBM")))
{
StereoSGBM sbm;
sbm.SADWindowSize = 25;
sbm.numberOfDisparities = 64; //144
sbm.preFilterCap = 63;
sbm.minDisparity = -10; //-39;
sbm.uniquenessRatio = 10;
sbm.speckleWindowSize = 100;
sbm.speckleRange = 32;
sbm.disp12MaxDiff = 1;
sbm.fullDP = false;
sbm.P1 = 216;
sbm.P2 = 864;
sbm(imgLeft, imgRight, disp);
}
//normalize(disp, disp8, 0, 255, CV_MINMAX, CV_8U);
normalize(disp, disp8, 0, 255, CV_MINMAX, CV_8UC1);
// Check its extreme values
double minVal; double maxVal;
minMaxLoc(disp, &minVal, &maxVal);
printf("Min disp: %f Max value: %f \n", minVal, maxVal);
// Display it as a CV_8UC1 image
disp.convertTo(disp8, CV_8UC1, 255 / (maxVal - minVal));
cv::Mat recons3D(disp8.size(), CV_32F);
//Reproject image to 3D
std::cout << "Reprojecting image to 3D..." << std::endl;
cv::reprojectImageTo3D(disp8, recons3D, Q, false, CV_32F);
waitKey(1);
//Create point cloud and fill it
std::cout << "Creating Point Cloud..." << std::endl;
pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
double px, py, pz;
uchar pr, pg, pb;
for (int i = 0; i < imgLeft.rows; i++)
{
uchar* rgb_ptr = imgLeft.ptr<uchar>(i);
#ifdef CUSTOM_REPROJECT
uchar* disp_ptr = disp8.ptr<uchar>(i);
#else
float* recons_ptr = recons3D.ptr<float>(i);
#endif
for (int j = 0; j < imgRight.cols; j++)
{
//Get 3D coordinates
#ifdef CUSTOM_REPROJECT
uchar d = disp_ptr[j];
if (d == 0) continue; //Discard bad pixels
double pw = -1.0 * static_cast<double>(d)* Q32 + Q33;
px = static_cast<double>(j)+Q03;
py = static_cast<double>(i)+Q13;
pz = Q23;
px = px / pw;
py = py / pw;
pz = pz / pw;
#else
px = recons_ptr[3 * j];
py = recons_ptr[3 * j + 1];
pz = recons_ptr[3 * j + 2];
#endif
//Get RGB info
pb = rgb_ptr[3 * j];
pg = rgb_ptr[3 * j + 1];
pr = rgb_ptr[3 * j + 2];
//Insert info into point cloud structure
pcl::PointXYZRGB point;
point.x = px;
point.y = py;
point.z = pz;
uint32_t rgb = (static_cast<uint32_t>(pr) << 16 |
static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb)); //NULL
point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr->points.push_back(point);
}
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
point_cloud_ptr->height = 20; //1
//Create visualizer
boost::shared_ptr<pcl::visualization::PCLVisualizer> viewer;
viewer = createVisualizer(point_cloud_ptr);
//Main loop
while (!viewer->wasStopped())
{
viewer->spinOnce(10); //100
boost::this_thread::sleep(boost::posix_time::microseconds(100000));
}
return 0;
}