为什么opencv的triangulatePoints会返回错误的结果?

时间:2016-02-17 03:58:27

标签: c++ opencv triangulation 3d-reconstruction

我正在使用opencv的三角测量点并遇到一些问题。这是我的工作:

第1步。 我通过函数generate3DPoints()

生成七个3D点

[0.5,0.5,-0.5]

[0.5,0.5,0.5]

[ - 0.5,0.5,0.5]

[ - 0.5,0.5,-0.5]

[0.5,-0.5,-0.5]

[ - 0.5,-0.5,-0.5]

[ - 0.5,-0.5,0.5]

第2步。 然后我通过opencv函数projectPoints()将它们投影到两个图像,并分别在向量leftImagePoints和righttImagePoints中保存2D点。

图像点:[0.5,0.5,-0.5]投射到[736.754,618.17]

图像点:[0.5,0.5,0.5]预计[731.375,611.951]

图像点:[ - 0.5,5,0,0.5]预计为[688.719,612.961]

图像点:[ - 0.5,5.5,-0.5]预计[692.913,619.172]

图像点:[0.5,-0.5,-0.5]预计[737.767,573.217]

图像点:[ - 0.5,-0.5,-0.5]预计[693.936,574.331]

图像点:[ - 0.5,-0.5,0.5]预计[689.71,569.285]

图像点:[0.5,0.5,-0.5]投射到[702.397,-121.563]

图像点:[0.5,0.5,0.5]预计[696.125,-93.1121]

图像点:[ - 0.5,5.5,0.5]预计[632.271,-90.1316]

图像点:[ - 0.5,5.5,-0.5]预计[634.829,-116.987]

图像点:[0.5,-0.5,-0.5]预计[715.505,-230.592]

图像点:[ - 0.5,-0.5,-0.5]投射到[642.35,-219.8]

图像点:[ - 0.5,-0.5,0.5]投射到[638.094,-180.103]

第3步。 之后,我使用opencv函数triangulatePoints来获得同位坐标并将它们转换为普通的3D坐标。

重建结果

10.43599,7.2594047,-33.088718;

11.009606,7.6683388,-33.098804;

10.033145,7.6832604,-33.375408;

9.5006475,7.2904119,-33.379032;

9.5954504,5.7358074,-32.76096;

8.7637157,5.8084483,-33.068729;

9.3709002,6.2525721,-33.122173

现在您可以看到:我自己生成的原始3D点与投影和重建后的结果不同。我找不到问题,希望你能帮助我〜
谢谢!

这是我的代码(通过opencv 2.4.9)



// testVirtualData.cpp : 
//

#include "stdafx.h"

#include <opencv2\opencv.hpp>

#include <iostream>
#include <string>

using namespace std;
using namespace cv;


vector<Point3f> generate3DPoints()
{
	vector<Point3f> pointsXYZ; // save 7 points

	double x, y, z;
 
	x = 0.5; y = 0.5; z = -0.5;
	pointsXYZ.push_back(Point3f(x, y, z));

	x = 0.5; y = 0.5; z = 0.5;
	pointsXYZ.push_back(Point3f(x, y, z));

	x = -0.5; y = 0.5; z = 0.5;
	pointsXYZ.push_back(Point3f(x, y, z));

	x = -0.5; y = 0.5; z = -0.5;
	pointsXYZ.push_back(Point3f(x, y, z));

	x = 0.5; y = -0.5; z = -0.5;
	pointsXYZ.push_back(Point3f(x, y, z));

	x = -0.5; y = -0.5; z = -0.5;
	pointsXYZ.push_back(Point3f(x, y, z));

	x = -0.5; y = -0.5; z = 0.5;
	pointsXYZ.push_back(Point3f(x, y, z));

	for (int i = 0; i < pointsXYZ.size(); i++)
	{
		cout << pointsXYZ[i] << endl;
	}
	
	return pointsXYZ;
}


vector<Point3f> triangulateInOpenCV(Matx34d leftPMat, Matx34d rightPMat, vector<Point2f> leftPtsxy, vector<Point2f> rightPtsxy)
{
	Mat corrPtsXYZHomo(4, leftPtsxy.size(), CV_64FC1);
	triangulatePoints(leftPMat, rightPMat, leftPtsxy, rightPtsxy, corrPtsXYZHomo);

	cout << "reconsturction result 3D points in homo-coordinate" << endl;
	cout << corrPtsXYZHomo << endl;

	vector<Point3f> corrPtsXYZ;
	
	for (int i = 0; i < corrPtsXYZHomo.cols; i++)  
	{
		float x = corrPtsXYZHomo.at<float>(0, i) / corrPtsXYZHomo.at<float>(3, i);
		float y = corrPtsXYZHomo.at<float>(1, i) / corrPtsXYZHomo.at<float>(3, i);
		float z = corrPtsXYZHomo.at<float>(2, i) / corrPtsXYZHomo.at<float>(3, i);
		corrPtsXYZ.push_back(Point3f(x, y, z));
		int t = 1;
	}

	return corrPtsXYZ;
}



int _tmain(int argc, _TCHAR* argv[])
{
	vector<Point3f> objectPoints = generate3DPoints();  //generate by myself
	vector<Point2f> rightImagePoints; // save project 
	vector<Point2f> leftImagePoints; // save project result


	// 1. intrinsic Matrix
	Mat intrisicMat(3, 3, DataType<double>::type);  
	intrisicMat.at<double>(0, 0) = 1.6415318549788924e+003;
	intrisicMat.at<double>(1, 0) = 0;
	intrisicMat.at<double>(2, 0) = 0;

	intrisicMat.at<double>(0, 1) = 0;
	intrisicMat.at<double>(1, 1) = 1.7067753507885654e+003;
	intrisicMat.at<double>(2, 1) = 0;

	intrisicMat.at<double>(0, 2) = 5.3262822453148601e+002;
	intrisicMat.at<double>(1, 2) = 3.8095355839052968e+002;
	intrisicMat.at<double>(2, 2) = 1;


	// 2.3. R T
	// left
	double leftRMatArray[] =
	{
		1, 0, 0,
		0, 1, 0,
		0, 0, 1
	};
	Mat leftRMat = Mat(3, 3, CV_64FC1, leftRMatArray); //Rotation Matrix
	Mat leftRVec(3, 1, DataType<double>::type); // Rotation vector
	Rodrigues(leftRMat, leftRVec);

	Mat leftTVec(3, 1, DataType<double>::type); // Translation vector
	leftTVec.at<double>(0) = 4.1158489381208221e+000;
	leftTVec.at<double>(1) = 4.6847683212704716e+000;
	leftTVec.at<double>(2) = 3.6169795190294256e+001;
	//leftTVec.at<double>(0) = 0;
	//leftTVec.at<double>(1) = 0;
	//leftTVec.at<double>(2) = 0;

	// right
	Mat rightRVec(3, 1, DataType<double>::type); // Rotation vector
	rightRVec.at<double>(0) = -3.9277902400761393e-002;
	rightRVec.at<double>(1) = 3.7803824407602084e-002;
	rightRVec.at<double>(2) = 2.6445674487856268e-002;


	Mat rightRMat; // Rotation Matrix
	Rodrigues(rightRVec, rightRMat);

	Mat rightTVec(3, 1, DataType<double>::type); // Translation vector
	rightTVec.at<double>(0) = 2.1158489381208221e+000;
	rightTVec.at<double>(1) = -7.6847683212704716e+000;
	rightTVec.at<double>(2) = 2.6169795190294256e+001;



	// 4. distortion
	Mat distCoeffs(5, 1, DataType<double>::type);   // Distortion vector
	distCoeffs.at<double>(0) = -7.9134632415085826e-001;
	distCoeffs.at<double>(1) = 1.5623584435644169e+000;
	distCoeffs.at<double>(2) = -3.3916502741726508e-002;
	distCoeffs.at<double>(3) = -1.3921577146136694e-002;
	distCoeffs.at<double>(4) = 1.1430734623697941e+002;

	cout << "Intrisic matrix: " << intrisicMat << endl << endl;
	cout << "Distortion coef: " << distCoeffs << endl << endl;
	cout << "left Rotation vector: " << leftRVec << endl << endl;
	cout << "left Translation vector: " << leftTVec << endl << endl;
	cout << "right Rotation vector: " << rightRVec << endl << endl;
	cout << "right Translation vector: " << rightTVec << endl << endl;

	// project
	// left
	projectPoints(objectPoints, leftRVec, leftTVec, intrisicMat, distCoeffs, leftImagePoints);
	
	// right
	projectPoints(objectPoints, rightRVec, rightTVec, intrisicMat, distCoeffs, rightImagePoints);

	for (int i = 0; i < leftImagePoints.size(); ++i)
	{
		cout << "Image point: " << objectPoints[i] << " Projected to " << leftImagePoints[i] << endl;
	}
	cout << "------------------" << endl;
	for (int i = 0; i < rightImagePoints.size(); ++i)
	{
		cout << "Image point: " << objectPoints[i] << " Projected to " << rightImagePoints[i] << endl;
	}

	//triangulate
	double leftPArray[] =
	{
		leftRMat.at<double>(0, 0), leftRMat.at<double>(0, 1), leftRMat.at<double>(0, 2), leftTVec.at<double>(0),
		leftRMat.at<double>(1, 0), leftRMat.at<double>(1, 1), leftRMat.at<double>(1, 2), leftTVec.at<double>(1),
		leftRMat.at<double>(2, 0), leftRMat.at<double>(2, 1), leftRMat.at<double>(2, 2), leftTVec.at<double>(2)
	};
	Mat leftPMat = Mat(3, 4, CV_64FC1, leftPArray); // left P Matrix

	double rightPArray[] =
	{
		rightRMat.at<double>(0, 0), rightRMat.at<double>(0, 1), rightRMat.at<double>(0, 2), rightTVec.at<double>(0),
		rightRMat.at<double>(1, 0), rightRMat.at<double>(1, 1), rightRMat.at<double>(1, 2), rightTVec.at<double>(1),
		rightRMat.at<double>(2, 0), rightRMat.at<double>(2, 1), rightRMat.at<double>(2, 2), rightTVec.at<double>(2)
	};
	Mat rightPMat = Mat(3, 4, CV_64FC1, rightPArray); // right P Matrix

	vector<Point3f> triangulationResult = triangulateInOpenCV(leftPMat, rightPMat, leftImagePoints, rightImagePoints);
	cout << "reconstruction result" << endl;
	cout << triangulationResult << endl;

	return 0;
}
&#13;
&#13;
&#13;

1 个答案:

答案 0 :(得分:0)

您对投影矩阵的计算并不完全正确。让我们忽略镜头失真,让生活更轻松。如果A1和A2表示两个摄像机的内部参数,则P1 = A1 * [R1 | t1]并且P2 = A2 * [R2 | t2]表示左和右摄像机的投影矩阵。您可能还需要稍微修改一下代码,因此它只包含double(或float)值。随着这些变化,我得到了

[ 0.5000000000000006,  0.4999999999999996, -0.4999999999999953;
  0.4999999999999991,  0.5000000000000002,  0.5000000000000033;
 -0.5000000000000008,  0.5000000000000003,  0.5000000000000016;
 -0.4999999999999995,  0.4999999999999996, -0.4999999999999952;
  0.5000000000000002, -0.4999999999999998, -0.4999999999999991;
 -0.4999999999999993, -0.4999999999999998, -0.5000000000000001;
 -0.5000000000000012, -0.5000000000000003,  0.4999999999999947]

接近输入点。这是代码:

#include <opencv2\opencv.hpp>

#include <iostream>
#include <string>

using namespace std;
using namespace cv;


vector<Point3d> generate3DPoints()
{
    vector<Point3d> pointsXYZ; // save 7 points

    double x, y, z;

    x = 0.5; y = 0.5; z = -0.5;
    pointsXYZ.push_back(Point3d(x, y, z));

    x = 0.5; y = 0.5; z = 0.5;
    pointsXYZ.push_back(Point3d(x, y, z));

    x = -0.5; y = 0.5; z = 0.5;
    pointsXYZ.push_back(Point3d(x, y, z));

    x = -0.5; y = 0.5; z = -0.5;
    pointsXYZ.push_back(Point3d(x, y, z));

    x = 0.5; y = -0.5; z = -0.5;
    pointsXYZ.push_back(Point3d(x, y, z));

    x = -0.5; y = -0.5; z = -0.5;
    pointsXYZ.push_back(Point3d(x, y, z));

    x = -0.5; y = -0.5; z = 0.5;
    pointsXYZ.push_back(Point3d(x, y, z));

    for (int i = 0; i < pointsXYZ.size(); i++)
    {
        cout << pointsXYZ[i] << endl;
    }

    return pointsXYZ;
}


vector<Point3d> triangulateInOpenCV(Matx34d leftPMat, Matx34d rightPMat, vector<Point2d> leftPtsxy, vector<Point2d> rightPtsxy)
{
    Mat corrPtsXYZHomo(4, leftPtsxy.size(), CV_64FC1);
    triangulatePoints(leftPMat, rightPMat, leftPtsxy, rightPtsxy, corrPtsXYZHomo);

    cout << "reconsturction result 3D points in homo-coordinate" << endl;
    cout << corrPtsXYZHomo << endl;

    vector<Point3d> corrPtsXYZ;

    for (int i = 0; i < corrPtsXYZHomo.cols; i++)
    {
        double x = corrPtsXYZHomo.at<double>(0, i) / corrPtsXYZHomo.at<double>(3, i);
        double y = corrPtsXYZHomo.at<double>(1, i) / corrPtsXYZHomo.at<double>(3, i);
        double z = corrPtsXYZHomo.at<double>(2, i) / corrPtsXYZHomo.at<double>(3, i);
        corrPtsXYZ.push_back(Point3d(x, y, z));
        int t = 1;
    }

    return corrPtsXYZ;
}



int main(int argc, char* argv[])
{
    vector<Point3d> objectPoints = generate3DPoints();  //generate by myself
    vector<Point2d> rightImagePoints; // save project 
    vector<Point2d> leftImagePoints; // save project result


                                     // 1. intrinsic Matrix
    Mat intrisicMat(3, 3, DataType<double>::type);
    intrisicMat.at<double>(0, 0) = 1.6415318549788924e+003;
    intrisicMat.at<double>(1, 0) = 0;
    intrisicMat.at<double>(2, 0) = 0;

    intrisicMat.at<double>(0, 1) = 0;
    intrisicMat.at<double>(1, 1) = 1.7067753507885654e+003;
    intrisicMat.at<double>(2, 1) = 0;

    intrisicMat.at<double>(0, 2) = 5.3262822453148601e+002;
    intrisicMat.at<double>(1, 2) = 3.8095355839052968e+002;
    intrisicMat.at<double>(2, 2) = 1;


    // 2.3. R T
    // left
    double leftRMatArray[] =
    {
        1, 0, 0,
        0, 1, 0,
        0, 0, 1
    };
    Mat leftRMat = Mat(3, 3, CV_64FC1, leftRMatArray); //Rotation Matrix
    Mat leftRVec(3, 1, DataType<double>::type); // Rotation vector
    Rodrigues(leftRMat, leftRVec);

    Mat leftTVec(3, 1, DataType<double>::type); // Translation vector
    leftTVec.at<double>(0) = 4.1158489381208221e+000;
    leftTVec.at<double>(1) = 4.6847683212704716e+000;
    leftTVec.at<double>(2) = 3.6169795190294256e+001;

    // right
    Mat rightRVec(3, 1, DataType<double>::type); // Rotation vector
    rightRVec.at<double>(0) = -3.9277902400761393e-002;
    rightRVec.at<double>(1) = 3.7803824407602084e-002;
    rightRVec.at<double>(2) = 2.6445674487856268e-002;


    Mat rightRMat; // Rotation Matrix
    Rodrigues(rightRVec, rightRMat);

    Mat rightTVec(3, 1, DataType<double>::type); // Translation vector
    rightTVec.at<double>(0) = 2.1158489381208221e+000;
    rightTVec.at<double>(1) = -7.6847683212704716e+000;
    rightTVec.at<double>(2) = 2.6169795190294256e+001;



    // 4. distortion
    Mat distCoeffs(5, 1, DataType<double>::type);   // Distortion vector
    distCoeffs.at<double>(0) = 0;// -7.9134632415085826e-001;
    distCoeffs.at<double>(1) = 0;//1.5623584435644169e+000;
    distCoeffs.at<double>(2) = 0;//-3.3916502741726508e-002;
    distCoeffs.at<double>(3) = 0;//-1.3921577146136694e-002;
    distCoeffs.at<double>(4) = 0;//1.1430734623697941e+002;

    cout << "Intrisic matrix: " << intrisicMat << endl << endl;
    cout << "Distortion coef: " << distCoeffs << endl << endl;
    cout << "left Rotation vector: " << leftRVec << endl << endl;
    cout << "left Translation vector: " << leftTVec << endl << endl;
    cout << "right Rotation vector: " << rightRVec << endl << endl;
    cout << "right Translation vector: " << rightTVec << endl << endl;

    // project
    // left
    projectPoints(objectPoints, leftRVec, leftTVec, intrisicMat, distCoeffs, leftImagePoints);

    // right
    projectPoints(objectPoints, rightRVec, rightTVec, intrisicMat, distCoeffs, rightImagePoints);

    for (int i = 0; i < leftImagePoints.size(); ++i)
    {
        cout << "Image point: " << objectPoints[i] << " Projected to " << leftImagePoints[i] << endl;
    }
    cout << "------------------" << endl;
    for (int i = 0; i < rightImagePoints.size(); ++i)
    {
        cout << "Image point: " << objectPoints[i] << " Projected to " << rightImagePoints[i] << endl;
    }

    Mat m1 = intrisicMat * leftRMat;
    Mat t1 = intrisicMat * leftTVec;

    //triangulate
    double leftPArray[] =
    {
        m1.at<double>(0, 0),m1.at<double>(0, 1),m1.at<double>(0, 2), t1.at<double>(0,0),
        m1.at<double>(1, 0),m1.at<double>(1, 1),m1.at<double>(1, 2), t1.at<double>(1,0),
        m1.at<double>(2, 0),m1.at<double>(2, 1),m1.at<double>(2, 2), t1.at<double>(2,0)
    };
    Mat leftPMat = Mat(3, 4, CV_64FC1, leftPArray); // left P Matrix

    Mat m2 = intrisicMat * rightRMat;
    Mat t2 = intrisicMat * rightTVec;
    double rightPArray[] =
    {
        m2.at<double>(0, 0), m2.at<double>(0, 1), m2.at<double>(0, 2), t2.at<double>(0,0),
        m2.at<double>(1, 0), m2.at<double>(1, 1), m2.at<double>(1, 2), t2.at<double>(1,0),
        m2.at<double>(2, 0), m2.at<double>(2, 1), m2.at<double>(2, 2), t2.at<double>(2,0)
    };
    Mat rightPMat = Mat(3, 4, CV_64FC1, rightPArray); // right P Matrix

    vector<Point3d> triangulationResult = triangulateInOpenCV(leftPMat, rightPMat, leftImagePoints, rightImagePoints);
    cout << "reconstruction result" << endl;
    cout << triangulationResult << endl;
    cin.get();
    return 0;
}