我在OpenCV中使用ORB来匹配源图像(我想要定位)和dst图像(源图像可能在其中)之间的关键点。现在我的问题是我有一些很好的匹配,但我如何在dst图像中定位源图像?
如果有人能帮助我,我真的很感激。
我的代码在这里,我使用了findHomography(),但它找不到正确的对象,甚至无法绘制四边形。帮我找到我的错误。
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char **argv)
{
double t = (double)cvGetTickCount();
cv::Mat object = cv::imread("card.jpg");
cv::VideoCapture mCapture;
cv::Mat scene;
mCapture.open(0);
assert(mCapture.isOpened());
while (1)
{
mCapture >> scene;
if (!scene.empty())
{
// ORB
cv::OrbFeatureDetector detector;
cv::OrbDescriptorExtractor extractor;
//Find keypoints
cv::Mat descriptorsA, descriptorsB;
std::vector<cv::KeyPoint> keypointsA, keypointsB;
//int64 last = cv::getTickCount();
detector.detect(object, keypointsA);
detector.detect(scene, keypointsB);
//printf("detection = %f[s]\n", (cv::getTickCount() - last)/cv::getTickFrequency());
//last = cv::getTickCount();
extractor.compute(object, keypointsA, descriptorsA);
extractor.compute(scene, keypointsB, descriptorsB);
//printf("description = %f[s]\n", (cv::getTickCount() - last)/cv::getTickFrequency());
// Match
std::vector<cv::DMatch> matches;
cv::BFMatcher matcher(cv::NORM_HAMMING, true);
matcher.match(descriptorsA, descriptorsB, matches);
// min distance
double min_dist = DBL_MAX;
for (int i = 0; i < (int)matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
}
if (min_dist < 1.0)
min_dist = 1.0;
//std::cout<<min_dist<<std::endl;
// save good matches
const double threshold = 1.5 * min_dist;
std::vector<cv::DMatch> matches_good;
for (int i = 0; i < (int)matches.size(); i++)
{
if (matches[i].distance < threshold)
{
matches_good.push_back(matches[i]);
}
}
// show
Mat matchImage;
// localize points
std::vector<cv::Point2f> object_points, scene_points;
for( int i = 0; i < matches_good.size(); i++ )
{
//-- Get the keypoints from the good matches
object_points.push_back( keypointsA[ matches_good[i].queryIdx ].pt );
scene_points.push_back( keypointsB[ matches_good[i].trainIdx ].pt );
}
Mat H = findHomography(object_points, scene_points, CV_RANSAC);
cv::drawMatches(object, keypointsA, scene, keypointsB, matches, matchImage, cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
std::vector<cv::Point2f> object_corners(4), scene_corners(4);
object_corners[0] = cv::Point(0, 0),
object_corners[1] = cv::Point(object.cols, 0);
object_corners[2] = cv::Point(object.cols, object.rows);
object_corners[3] = cv::Point(0, object.rows);
cv::perspectiveTransform(object_corners, scene_corners, H);
Mat diff;
//diff = Mat(scene_corners) - Mat(scene_points);
//cv::imshow("Diff",diff);
// draw bounds
cv::line(matchImage, scene_corners.at(0) + cv::Point2f(object.cols, 0), scene_corners.at(1) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
cv::line(matchImage, scene_corners.at(1) + cv::Point2f(object.cols, 0), scene_corners.at(2) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
cv::line(matchImage, scene_corners.at(2) + cv::Point2f(object.cols, 0), scene_corners.at(3) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
cv::line(matchImage, scene_corners.at(3) + cv::Point2f(object.cols, 0), scene_corners.at(0) + cv::Point2f(object.cols, 0), cv::Scalar(0, 255, 0), 4);
cv::imshow("match", matchImage);
//std::cout<<"match size:"<<matches.size()<<std::endl;
}
int key = cv::waitKey(30);
if (key == 27)
{
return 1;
}
}
cv::waitKey(0);
return 0;
}
答案 0 :(得分:0)
您需要使用对噪声和异常值具有鲁棒性的RANSAC或LMeds(最小平方中值)等算法来估算位置。
答案 1 :(得分:0)
您需要使用WarpPerspective()并将您找到的单应性应用于两个图像。