使用OpenCV的warpAffine进行图像注册

时间:2016-10-18 05:49:51

标签: c++ opencv affinetransform

我正在尝试使用ORB功能进行图像注册。 我在使用warpAffine时遇到了问题。编译器告诉我们无法转换参数' 1'从cv :: Mat *到cv :: InputArray。 这是我的代码:

#pragma once

// Standard C++ I/O library.
#include <iostream>
#include <string>
#include <iomanip>
#include <vector>


// OpenCV library.
#include <cv.h>
#include <highgui.h>

// OpenCV feature library.
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <nonfree/features2d.hpp>




// main().
int main(int argv, char ** argc)
{
    cv::Mat im_ref, im_cmp;

    std::string  str_ref, str_cmp;

    // Read reference image.
    //std::cout<<"Input reference image filename: ";
    //std::cin>>str_ref;
    std::cout<<"-> Reading images."<<std::endl;
    str_ref = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\206.png";

    im_ref = cv::imread(str_ref);
    cv::imshow("Reference image", im_ref);

    // Read testing image.
    //std::cout<<"Input testing image filename: ";
    //std::cin>>str_cmp;
    str_cmp = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\227.png";

    im_cmp = cv::imread(str_cmp);
    cv::imshow("Testing image", im_cmp);

    std::cout<<"Press any key to continue."<<std::endl;
    cvWaitKey(0);



    // Feature detection.
    std::cout<<"-> Feature detection."<<std::endl;
    std::vector <cv::KeyPoint> key_ref, key_cmp;           // Vectors for features extracted from reference and testing images.
    cv::Mat  des_ref, des_cmp;                             // Descriptors for features of 2 images.

    cv::ORB orb1;                                          // An ORB object.

    orb1(im_ref, cv::Mat(), key_ref, des_ref);             // Feature extraction.
    orb1(im_cmp, cv::Mat(), key_cmp, des_cmp);  


    // Show keypoints.
    std::cout<<"-> Show keypoints."<<std::endl;
    cv::Mat drawkey_ref, drawkey_cmp;                              // Output image for keypoint drawing.
    cv::drawKeypoints(im_ref, key_ref, drawkey_ref);               // Generate image for keypoint drawing.
    cv::imshow("Keypoints of reference", drawkey_ref);
    cv::drawKeypoints(im_cmp, key_cmp, drawkey_cmp);
    cv::imshow("Keypoints of test", drawkey_cmp);

    cvWaitKey(0);


    // Matching.
    std::cout<<"-> Matching."<<std::endl;
    cv::FlannBasedMatcher matcher1(new cv::flann::LshIndexParams(20,10,2));
    std::vector<cv::DMatch> matches1;
    matcher1.match(des_ref, des_cmp, matches1);            // Match two sets of features.

    double max_dist = 0; 
    double min_dist = 100;

    // Find out the minimum and maximum of all distance.
    for( int i = 0; i < des_ref.rows; i++ )
    { 
        double dist = matches1[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    cvWaitKey(0);


    // Eliminate relatively bad points.
    std::cout<<"-> Bad points elimination"<<std::endl;
    std::vector<cv::KeyPoint> kgood_ref, kgood_cmp;
    std::vector<cv::DMatch> goodMatch;
    for (int i=0; i<matches1.size(); i++)
    {
        if(matches1[i].distance < 2*min_dist)      // Keep points that are less than 2 times of the minimum distance.
        {
            goodMatch.push_back(matches1[i]);
            kgood_ref.push_back(key_ref[i]);
            kgood_cmp.push_back(key_cmp[i]);
        }  // end if
    } // end for
    cvWaitKey(0);


    // Calculate affine transform matrix.
    std::cout<<"-> Calculating affine transformation."<<std::endl;
    std::vector<cv::Point2f>   frm1_feature, frm2_feature;
    const int p_size = goodMatch.size();
    // * tmpP = new tmpPoint[p_size];
    cv::Point2f tmpP;


    for(int i=0; i<goodMatch.size(); i++)
    {
        tmpP.x = kgood_ref[i].pt.x;
        tmpP.y = kgood_ref[i].pt.y;
        frm1_feature.push_back(tmpP);

        tmpP.x = kgood_cmp[i].pt.x;
        tmpP.y = kgood_cmp[i].pt.y;
        frm2_feature.push_back(tmpP);
    }
    cv::Mat  affine_mat = cv::estimateRigidTransform(frm1_feature, frm2_feature, true);
    cv::Mat im_transformed;

    // Output results.
    cv::warpAffine(&im_cmp, &im_transformed, affine_mat, CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS); // error comes from here.
    cv::imshow("Transformed image", im_transformed);

    cvWaitKey(0);

    return 0;
}

我在使用Evgeniy给出的答案之前得到了结果。 我使用的转换是

//cv::warpAffine( im_cmp, im_transformed, affine_mat, cv::Size(im_cmp.cols, im_cmp.rows) );

转化后的结果很奇怪enter image description here

我想要做的是最终获得参考图像和此变换图像的合并图像。这实际上是我的第一步。这是使用warpAffine()的转换参数的问题。

最后,我想得到一个像这里的例子的结果(两个图像在差异位置拍摄,最后对齐) enter image description here

1 个答案:

答案 0 :(得分:1)

您正在提供指针,但wrapAffine接受对cv :: Mat的引用。 您可以像这样更改代码:

cv::warpAffine(im_cmp, im_transformed, affine_mat, cv::Size(), CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS); 

只需删除&#39;&amp;&#39;