如何在OpenCV 3.1上使用SIFT功能绘制检测到的对象?

时间:2016-08-16 09:37:41

标签: c++ opencv sift surf

使用此代码查找图像之间的匹配:

#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <opencv2/xfeatures2d.hpp>

#include <vector>

using namespace std;
using namespace cv;

int main(int argc, char *argv[]) {
    //cv::initModule_nonfree();
    //initModule_features2d();
    Mat img_1 = imread("C:/Users/Dan/Desktop/0.jpg", 1);
    Mat img_2 = imread("C:/Users/Dan/Desktop/0.jpg", 1);

    cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();

    //-- Step 1: Detect the keypoints:
    std::vector<KeyPoint> keypoints_1, keypoints_2;
    f2d->detect(img_1, keypoints_1);
    f2d->detect(img_2, keypoints_2);

    //-- Step 2: Calculate descriptors (feature vectors)    
    Mat descriptors_1, descriptors_2;
    f2d->compute(img_1, keypoints_1, descriptors_1);
    f2d->compute(img_2, keypoints_2, descriptors_2);

    Mat out0;
    drawKeypoints(img_1, keypoints_1, out0);
    imshow("KeyPoint0.jpg", out0);

    //-- Step 3: Matching descriptor vectors using BFMatcher :
    BFMatcher matcher;
    std::vector< DMatch > matches;
    matcher.match(descriptors_1, descriptors_2, matches);

    Mat img_matches = Mat::zeros( img_1.size(), CV_8UC3 );
    drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);
    imshow("matches", img_matches);

    waitKey(0);  // Keep window there until user presses 'q' to quit.

    return 0;
}

由于OpenCV 3.1函数已更改,我使用SURFSIFT查找了示例代码,但找不到任何代码。

如何修改此代码,以便在检测到的对象周围绘制轮廓,类似于OpenCV version

1 个答案:

答案 0 :(得分:0)

您需要使用findHomography来获得将您的训练图像(img_1)与要检测的图像(img_2)相关联的转换

然后你可以使用获得的单应性在训练图像的边界框(原点)上做一个perspectiveTransform,在检测到的图像上放置正确的边界框

取自ORB detection example

的相关代码
Mat inlier_mask, homography;
vector<KeyPoint> inliers1, inliers2;
vector<DMatch> inlier_matches;
if(matched1.size() >= 4) {
    homography = findHomography(Points(matched1), Points(matched2),
                                RANSAC, ransac_thresh, inlier_mask);
}

for(unsigned i = 0; i < matched1.size(); i++) {
    if(inlier_mask.at<uchar>(i)) {
        int new_i = static_cast<int>(inliers1.size());
        inliers1.push_back(matched1[i]);
        inliers2.push_back(matched2[i]);
        inlier_matches.push_back(DMatch(new_i, new_i, 0));
    }
}
stats.inliers = (int)inliers1.size();
stats.ratio = stats.inliers * 1.0 / stats.matches;

vector<Point2f> new_bb;
perspectiveTransform(object_bb, new_bb, homography);
Mat frame_with_bb = frame.clone();
if(stats.inliers >= bb_min_inliers) {
    drawBoundingBox(frame_with_bb, new_bb);
}
Mat res;
drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
            inlier_matches, res,
            Scalar(255, 0, 0), Scalar(255, 0, 0));