OpenCv,从鱼眼镜头对中对三角点进行三角测量

时间:2019-05-07 13:30:16

标签: opencv computer-vision triangulation

我有一副由鱼眼镜头组成的立体摄像机。

我有一个可以成功校准和校正图像的应用程序。 整改看起来不错。我在两个图像上匹配了功能,看起来也不错。

现在,我需要对这些点进行三角剖分,但是我似乎无法获得良好的结果。

我已检查:

本身没有失真。 通过用水平线显示两个图像进行校正。 该功能与cv::DrawMatches

相匹配

所有这些步骤看起来都很不错,但是下面的三角测量步骤输出垃圾。

我在做什么错?

我的三角测量代码是:

void testTri(Mat K1, Vec4d D1, Mat R1, Mat K2, Vec4d D2, Mat R2, Vec3d T, cv::Mat P1, Mat P2)
{

    cv::Ptr<cv::ORB> orb = ORB::create(5000);

    vector<KeyPoint> kp1, kp2;
    Mat d1, d2;

    Mat orig_left, orig_right;
    orig_left = imread("/imgs_for_test/7_left.png");
    orig_right = imread("/imgs_for_test/7_right.png");

    cv::Mat lmapx, lmapy, rmapx, rmapy;
    cv::fisheye::initUndistortRectifyMap(K1, D1, R1, K1, Size(w, h), CV_16SC2, lmapx, lmapy);

    cv::fisheye::initUndistortRectifyMap(K2, D2, R2, K1, Size(w, h), CV_16SC2, rmapx, rmapy);
    Mat undist_left, undist_right;
    cv::remap(orig_left, undist_left, lmapx, lmapy, cv::INTER_LINEAR);
    cv::remap(orig_right, undist_right, rmapx, rmapy, cv::INTER_LINEAR);

    orb->detectAndCompute(undist_left, Mat(), kp1, d1);
    orb->detectAndCompute(undist_right, Mat(), kp2, d2);

    std::vector<cv::DMatch> matches;

    matches = RobustMatching(d1, d2, kp1, kp2);

    if (matches.empty())
    {
        std::cout << "No row matches for triangulation found." << std::endl;

    }
    cv::Mat outMatched;
    std::vector<char> mask(matches.size(), 1);
    cv::drawMatches(undist_left, kp1, undist_right, kp2, matches, outMatched, cv::Scalar::all(-1), cv::Scalar::all(-1), mask);
    cv::imshow("matched", outMatched);
    cv::waitKey(1);

    std::vector<cv::Point2f> pntsMatchedLeft;
    std::vector<cv::Point2f> pntsMatchedRight;

    if (matches.size() > 5)
    {
        for (int x = 0; x < matches.size(); x++)
        {
            cv::Point2f pnt2dL = kp1[matches[x].queryIdx].pt;
            pntsMatchedLeft.push_back(pnt2dL);

            cv::Point2f pnt2dR = kp2[matches[x].trainIdx].pt;
            pntsMatchedRight.push_back(pnt2dR);


        }


        // Camera 1 Projection Matrix K[I|0]
        cv::Mat PP1(3, 4, cv::DataType<float>::type);
        K1.copyTo(PP1.rowRange(0, 3).colRange(0, 3));  //K1 is the camera matrix


    //stereo 
        float tx = -4.1392073003541432e-02; //Stereo baseline
        float ty = 0;
        float tz = 0;

        //rotation ( images are rectified, so this is zero)
        float rots[9] = { 1,0,0,0,1,0,0,0,1 };
        cv::Mat R0 = cv::Mat(3, 3, CV_32F, rots);


        //translation. (stereo camera, rectified images, baseline)
        float trans[3] = { tx,ty,tz };
        cv::Mat t = cv::Mat(3, 1, CV_32F, trans);


        // Camera 2 Projection Matrix K[R|t]
        cv::Mat PP2(3, 4, CV_32F);
        R0.copyTo(PP2.rowRange(0, 3).colRange(0, 3));
        t.copyTo(PP2.rowRange(0, 3).col(3));
        PP2 = K2 * PP2;


        Mat points3Dnorm;

        cv::Mat point3d_homo;
        cv::triangulatePoints(PP1, PP2,
            pntsMatchedLeft, pntsMatchedRight,
            point3d_homo);

        assert(point3d_homo.cols == triangulation_points1.size());


        point3d_homo.copyTo(points3Dnorm);

        std::ofstream fsD;
        std::string filenameD = "localMap.csv";
        fsD.open(filenameD);

        for (int i = 0; i < point3d_homo.cols; i++)
        {
            points3Dnorm.at<float>(0, i) = point3d_homo.at<float>(0, i) / point3d_homo.at<float>(3, i);
            points3Dnorm.at<float>(1, i) = point3d_homo.at<float>(1, i) / point3d_homo.at<float>(3, i);
            points3Dnorm.at<float>(2, i) = point3d_homo.at<float>(2, i) / point3d_homo.at<float>(3, i);

                    fsD << i << " " << points3Dnorm.at<float>(0, i) << " " << points3Dnorm.at<float>(1, i) << " " << points3Dnorm.at<float>(2, i) << std::endl;

        }

    }


}

0 个答案:

没有答案