OpenCV:基本矩阵精度

时间:2013-12-12 16:07:26

标签: opencv image-processing computer-vision camera-calibration reprojection-error

我正在尝试计算2张图像的基本矩阵(同一相机拍摄的静态场景的不同照片)。

我使用 findFundamentalMat 计算它,我使用结果计算其他矩阵(Essential,Rotation,...)。结果显然是错误的。所以,我试图确定计算出的基本矩阵的准确性。

使用极线约束方程,计算基本矩阵误差。错误非常高(如几百)。我不知道我的代码有什么问题。我非常感谢任何帮助。特别是:在基本矩阵计算中是否有任何遗漏的东西?是我计算错误的方式吗?

另外,我运行的代码数量非常不同。通常有很多异常值。例如,在超过80场比赛的情况下,只有10个内线。

Mat img_1 = imread( "imgl.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "imgr.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }

//-- Step 1: Detect the keypoints using SURF Detector

int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;

detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );

//-- Step 2: Calculate descriptors (feature vectors)

SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );

//-- Step 3: Matching descriptor vectors with a brute force matcher

BFMatcher matcher(NORM_L1, true);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );

vector<Point2f>imgpts1,imgpts2;
for( unsigned int i = 0; i<matches.size(); i++ )
{
    // queryIdx is the "left" image
    imgpts1.push_back(keypoints_1[matches[i].queryIdx].pt);
    // trainIdx is the "right" image
    imgpts2.push_back(keypoints_2[matches[i].trainIdx].pt);
}

//-- Step 4: Calculate Fundamental matrix

Mat f_mask;
Mat F =  findFundamentalMat  (imgpts1, imgpts2, FM_RANSAC, 0.5, 0.99, f_mask);

//-- Step 5: Calculate Fundamental matrix error

//Camera intrinsics
double data[] = {1189.46 , 0.0, 805.49,
                0.0, 1191.78, 597.44,
                0.0, 0.0, 1.0};
Mat K(3, 3, CV_64F, data);
//Camera distortion parameters
double dist[] = { -0.03432, 0.05332, -0.00347, 0.00106, 0.00000};
Mat D(1, 5, CV_64F, dist);

//working with undistorted points
vector<Point2f> undistorted_1,undistorted_2;
vector<Point3f> line_1, line_2;
undistortPoints(imgpts1,undistorted_1,K,D);
undistortPoints(imgpts2,undistorted_2,K,D);
computeCorrespondEpilines(undistorted_1,1,F,line_1);
computeCorrespondEpilines(undistorted_2,2,F,line_2);

double f_err=0.0;
double fx,fy,cx,cy;
fx=K.at<double>(0,0);fy=K.at<double>(1,1);cx=K.at<double>(0,2);cy=K.at<double>(1,2);
Point2f pt1, pt2;
int inliers=0;
//calculation of fundamental matrix error for inliers
for (int i=0; i<f_mask.size().height; i++)
    if (f_mask.at<char>(i)==1)
    {
        inliers++;
        //calculate non-normalized values
        pt1.x = undistorted_1[i].x * fx + cx;
        pt1.y = undistorted_1[i].y * fy + cy;
        pt2.x = undistorted_2[i].x * fx + cx;
        pt2.y = undistorted_2[i].y * fy + cy;
        f_err += = fabs(pt1.x*line_2[i].x +
                pt1.y*line_2[i].y + line_2[i].z)
                + fabs(pt2.x*line_1[i].x +
                pt2.y*line_1[i].y + line_1[i].z);
    }

double AvrErr = f_err/inliers;

3 个答案:

答案 0 :(得分:0)

我认为问题是因为你只计算了基于强力匹配器的基本矩阵,你应该对这些对应点做一些更优化,比如定量测试和对称测试。 我建议你准备好第233页,来自Book&#34; OpenCV2计算机视觉应用程序设计手册&#34;第9章。 它解释得很好!

答案 1 :(得分:0)

在计算基本矩阵之前,似乎没有对点进行标准化。可能是openCV的findFundamentalMat没有使用标准化的8点算法而只使用没有标准化的算法。如果是这种情况,那么由于缺少规范化,您的结果将会出错。

答案 2 :(得分:0)

  • 鉴于我们提供了内在矩阵K和失真矩阵D,我们应该在将它提供给findFundamentalMat之前对图像点进行无失真处理,并且应该在今后的未失真图像协调上工作(即用于计算误差) 。我发现这个简单的改变将任何图像点对的最大误差从176.0f减少到0.2,并且内部数量从18增加到77.

  • 我还玩弄了将未失真的图像点归一化到它之前找到基本原理,这会将任何图像点对的最大误差减少到几乎为零,但它不会进一步增加内点的数量。

    const float kEpsilon = 1.0e-6f;
    
    float sampsonError(const Mat &dblFMat, const Point2f &pt1, const Point2f &pt2)
    {
    
    
        Mat m_pt1(3, 1 , CV_64FC1 );//m_pt1(pt1);
        Mat m_pt2(3, 1 , CV_64FC1 );
        m_pt1.at<double>(0,0) = pt1.x; m_pt1.at<double>(1,0) = pt1.y; m_pt1.at<double>(2,0) = 1.0f;
        m_pt2.at<double>(0,0) = pt2.x; m_pt2.at<double>(1,0) = pt2.y; m_pt2.at<double>(2,0) = 1.0f;
    
        assert(dblFMat.rows==3 && dblFMat.cols==3);
        assert(m_pt1.rows==3 && m_pt1.cols==1);
        assert(m_pt2.rows==3 && m_pt2.cols==1);
        Mat dblFMatT(dblFMat.t());
        Mat dblFMatp1=(dblFMat * m_pt1);
        Mat dblFMatTp2=(dblFMatT * m_pt2);
        assert(dblFMatp1.rows==3 && dblFMatp1.cols==1);
        assert(dblFMatTp2.rows==3 && dblFMatTp2.cols==1);
    
        Mat numerMat=m_pt2.t() * dblFMatp1;
        double numer=numerMat.at<double>(0,0);
        if (numer < kEpsilon)
        {
            return 0;
    
        } else {
            double denom=dblFMatp1.at<double>(0,0) + dblFMatp1.at<double>(1,0) +  dblFMatTp2.at<double>(0,0) + dblFMatTp2.at<double>(1,0);
            double ret=(numer*numer)/denom;
            return (numer*numer)/denom;
        }
    }
    
    #define UNDISTORT_IMG_PTS 1
    #define NORMALIZE_IMG_PTS 1
    
    int filter_imgpts_pairs_with_epipolar_constraint(
        const vector<Point2f> &raw_imgpts_1,
        const vector<Point2f> &raw_imgpts_2,
        int imgW,
        int imgH
    )
    {
    
    #if UNDISTORT_IMG_PTS
        //Camera intrinsics
        double data[] = {1189.46 , 0.0, 805.49,
                        0.0, 1191.78, 597.44,
                        0.0, 0.0, 1.0};
        Mat K(3, 3, CV_64F, data);
        //Camera distortion parameters
        double dist[] = { -0.03432, 0.05332, -0.00347, 0.00106, 0.00000};
        Mat D(1, 5, CV_64F, dist);
    
    
        //working with undistorted points
        vector<Point2f> unnormalized_imgpts_1,unnormalized_imgpts_2;
        undistortPoints(raw_imgpts_1,unnormalized_imgpts_1,K,D);
        undistortPoints(raw_imgpts_2,unnormalized_imgpts_2,K,D);
    
    #else
        vector<Point2f> unnormalized_imgpts_1(raw_imgpts_1);
        vector<Point2f> unnormalized_imgpts_2(raw_imgpts_2);
    #endif
    
    
    
    #if NORMALIZE_IMG_PTS
    
        float c_col=imgW/2.0f;
        float c_row=imgH/2.0f;
        float multiply_factor= 2.0f/(imgW+imgH);
    
        vector<Point2f> final_imgpts_1(unnormalized_imgpts_1);
        vector<Point2f> final_imgpts_2(unnormalized_imgpts_2);
    
        for( auto iit=final_imgpts_1.begin(); iit != final_imgpts_1.end(); ++ iit)
        {
            Point2f &imgpt(*iit);
            imgpt.x=(imgpt.x - c_col)*multiply_factor;
            imgpt.y=(imgpt.y - c_row)*multiply_factor;
        }
        for( auto iit=final_imgpts_2.begin(); iit != final_imgpts_2.end(); ++ iit)
        {
            Point2f &imgpt(*iit);
            imgpt.x=(imgpt.x - c_col)*multiply_factor;
            imgpt.y=(imgpt.y - c_row)*multiply_factor;
        }
    
    #else
    
        vector<Point2f> final_imgpts_1(unnormalized_imgpts_1);
        vector<Point2f> final_imgpts_2(unnormalized_imgpts_2);
    #endif
    
        int algorithm=FM_RANSAC;
        //int algorithm=FM_LMEDS;
    
    
        vector<uchar>status;
    
        Mat F =  findFundamentalMat  (final_imgpts_1, final_imgpts_2, algorithm, 0.5, 0.99, status);
        int n_inliners=std::accumulate(status.begin(), status.end(), 0);
    
    
    
        assert(final_imgpts_1.size() == final_imgpts_2.size());
        vector<float> serr;
        for( unsigned int i = 0; i< final_imgpts_1.size(); i++ )
        {
            const Point2f &p_1(final_imgpts_1[i]);
            const Point2f &p_2(final_imgpts_2[i]);
            float err= sampsonError(F, p_1, p_2);
            serr.push_back(err);
        }
        float max_serr=*max_element(serr.begin(), serr.end());
        cout << "found " << raw_imgpts_1.size() << "matches " << endl;
        cout << " and " << n_inliners << " inliners" << endl;
        cout << " max sampson err" << max_serr << endl;
        return 0;
    }