3D重建正在进行,坐标错误

时间:2019-04-24 04:17:16

标签: c++ opencv computer-vision 3d-reconstruction

我假设我已经知道相机的内部和外部参数。然后,使用特征点匹配以及参考图像和摄像机(摄像机)图像的摄像机姿态(R | T)进行三角剖分。仅使用一台摄像机,以便内部参数相同

    ...
Mat K = getIntrinsic(f.x, f.y, c.x, c.y);

Mat distort = getDistortCoeff(dist[0], dist[1], dist[2], dist[3], dist[4]);

    Size boardPattern(10, 7); // board pattern
vector<Point3d> objectPoints; // x, y, z
float board_cellsize = 0.025f; // cell size = 0.025

for (int height = 0; height < boardPattern.height; height++)
{
    for (int width = 0; width < boardPattern.width; width++)
    {
        objectPoints.push_back(Point3d(board_cellsize * width, board_cellsize * height, 0));
    }
}



vector<Point2d> corner_point1;
cv::Mat R1;
Mat rv1, tv1;

findChessboardCorners(db_original, boardPattern, corner_point1,
    CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK);

solvePnP(objectPoints, corner_point1, K, distort, rv1, tv1);
cv::Rodrigues(rv1, R1);

///所以我得到了参考图像的R1,T

Mat gray_db;
cvtColor(db_original, gray_db, CV_RGB2GRAY);


Ptr<SIFT> detector = SIFT::create();
Ptr<SIFT> extractor = SIFT::create();

FlannBasedMatcher matcher;


std::vector<KeyPoint> kp_object;
Mat des_object;

detector->detect(gray_db, kp_object);
extractor->compute(gray_db, kp_object, des_object);

int framecount = 0;


while (1)
{
    if (framecount < 50) {
        framecount++;
        continue;
    }

    std::vector<DMatch> good_matches;
    Mat frame;
    Mat grayImage;

    capture >> frame;

    vector<Point2d> corner_point2;
    cv::Mat R2;
    Mat rv2, tv2;

    if (findChessboardCorners(frame, boardPattern, corner_point2,
        CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK))
    {
        solvePnP(objectPoints, corner_point2, K, distort, rv2, tv2);
        cv::Rodrigues(rv2, R2);
                    // cam image: R, t

        cvtColor(frame, grayImage, CV_RGB2GRAY);

        std::vector<KeyPoint> cam_object;
        Mat des_cam_object;

        detector->detect(grayImage, cam_object);
        extractor->compute(grayImage, cam_object, des_cam_object);

        if ((des_object.empty()) || (des_cam_object.empty()))
            continue;

        std::vector< std::vector<DMatch> > matches;

        matcher.knnMatch(des_object, des_cam_object, matches, 2);

        if (matches.empty()) continue;


        //-- Filter matches using the Lowe's ratio test
        const float ratio_thresh = 0.8f;

        for (size_t i = 0; i < matches.size(); i++)
        {
            if (matches[i][0].distance < ratio_thresh * matches[i][1].distance)
            {
                good_matches.push_back(matches[i][0]);
            }
        }



        drawKeypoints(frame, kp_object, frame, Scalar::all(-1), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

        matches.clear();

        Mat img_matches;


        drawMatches(db_original, kp_object, frame, cam_object, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
        namedWindow("good Matches", 1);
        imshow("good Matches", img_matches);


        for (int i = 0; i < good_matches.size(); i++)
        {
            //cout << i << " :";
            //-- Get the keypoints from the good matches
            if (kp_object[good_matches[i].queryIdx].pt.x > 0 && kp_object[good_matches[i].queryIdx].pt.y > 0
                && cam_object[good_matches[i].trainIdx].pt.x > 0 && cam_object[good_matches[i].trainIdx].pt.y > 0) {
                first_keypoints.push_back(kp_object[good_matches[i].queryIdx].pt);
                //cout << "first point" << keypoints1[ good_matches[i].queryIdx ].pt << endl;

                second_keypoints.push_back(cam_object[good_matches[i].trainIdx].pt);
                //cout << "second point" << keypoints2[ good_matches[i].trainIdx ].pt << endl;
            }
        }

        // traingularation

        if (first_keypoints.empty() || second_keypoints.empty()) continue;


        std::vector<cv::Point2d> points0 = first_keypoints;
        std::vector<cv::Point2d> points1 = second_keypoints;

        if (points0.size() != points1.size()) return -1;

        Mat R = R2 * R1.t();
        Mat T = -R2 * R1.t() * tv1;
                    // R|t between two cameras 

        cv::Mat P0 = K * cv::Mat::eye(3, 4, CV_64F);

        cv::Mat Rt, X;
        cv::hconcat(R, T, Rt);

        cv::Mat P1 = K * Rt;

        cv::triangulatePoints(P0, P1, points0, points1, X);



        X.row(0) = X.row(0) / X.row(3);

        X.row(1) = X.row(1) / X.row(3);

        X.row(2) = X.row(2) / X.row(3);

        X.row(3) = 1;

                    ....
                    fprintf(fout, "%f %f %f\n", X.at<double>(0, c), X.at<double>(1, c), X.at<double>(2, c));
                    ....

main.cpp

    float board_cellsize = 0.025f;
for (int height = 0; height < boardPattern.height; height++)
{
    for (int width = 0; width < boardPattern.width; width++)
    {
        objectPoints.push_back(Point3f(board_cellsize * width, board_cellsize * height, 0));
    }
}
    reconstruct_point.push_back(cv::Point3f(x, y, w));
    if (findChessboardCorners(input, boardPattern, corner_point,
        CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK))
    {
        Mat rv, tv;
        solvePnP(objectPoints, corner_point, K, distort, rv, tv);

        Mat result;

        projectPoints(reconstruct_point, rv, tv, K, distort, result);




        result.reshape(1).convertTo(result, CV_32S);



        for (int i = 0; i < result.rows; i++)
        {
                cv::circle(input, Point(result.row(i)), 2, Scalar(0, 0, 255), 3);
        }

我省略了一些非关键代码。

最后,它不能正确显示。

我省略重要步骤还是您犯了一个错误??

首先,获得E(基本矩阵)和F(基本矩阵)并使用restorePose,但是此函数返回归一化范数t向量,因此无法获得标量。 enter image description here enter image description here

0 个答案:

没有答案