从样本图像中选取感兴趣区域后。我想与数据集中的另一个图像匹配。下面的代码是编译没有错误但无法绘制任何匹配。我经历了很多网站,发现任何错误。也许我错过了什么。任何帮助,将不胜感激。
#include <stdio.h>
#include <iostream>
#include <opencv2\opencv.hpp>
#include <fstream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
#define LEFT_NAME "%s\\left%05d.png"
#define OPTICAL_NAME "%s\\track_%05d.png"
#define PATH_V_L "C:\\OpenCV32\\SequenceLeftCamera"//Left images
#define PATH_O_L "C:\\OpenCV32\\SequenceLeftCamera\\Tracking"//Tracked images
#define START_IMG 462
#define END_IMG 170
#define MAX_FN 128
Rect cropRect(0, 0, 0, 0);
void checkBoundary(int &x, int &y, int &width, int &height, Mat img) {
//check croping rectangle exceed image boundary
if (width>img.cols - x)
width = img.cols - x;
if (height>img.rows - y)
height = img.rows - y;
if (x<0)
x = 0;
if (y<0)
y = 0;
}
char dphw_buffer[MAX_FN];
static char * makeFileName(char *prefix, char *format, int index) {
sprintf_s(dphw_buffer, MAX_FN, format, prefix, index);
return dphw_buffer;
}
/** @function main */
int main(int argc, char** argv)
{
char *fn1, *fn2;
fn1 = makeFileName(PATH_V_L, LEFT_NAME, START_IMG);
Mat img_object = imread(fn1);
imshow("Original Image", img_object);
cropRect.x = 563;
cropRect.y = 90;
cropRect.width = 41;
cropRect.height = 135;
Rect scene_rectangle = cropRect;
Mat iobject = img_object(cropRect);
imshow("Test", iobject);
cvWaitKey(10);
//-- Step 1: Detect the keypoints using AKAZE Detector
Ptr<FeatureDetector> detector = AKAZE::create();
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector->detect(iobject, keypoints_object);
//-- Step 2: Calculate descriptors (feature vectors)
Ptr<DescriptorExtractor> extractor = AKAZE::create();
Mat descriptors_object, descriptors_scene;
extractor->compute(iobject, keypoints_object, descriptors_object);
//-- Opening a text file
ofstream myfile("ORB_detector.txt");
myfile << "ORB Detector keypoints.\n";
int minX, minY, maxX, maxY;
for (int i = START_IMG + 1; i < 487; i++)
{
printf("\n\nTracking frame %d \n\n", i);
fn2 = makeFileName(PATH_V_L, LEFT_NAME, i);
Mat img_scene = imread(fn2);
scene_rectangle.x = scene_rectangle.x - 10;
scene_rectangle.y = scene_rectangle.y - 10;
scene_rectangle.width = scene_rectangle.width + 5;
scene_rectangle.height = scene_rectangle.height + 5;
checkBoundary(scene_rectangle.x, scene_rectangle.y, scene_rectangle.width, scene_rectangle.height, img_scene);
scene_rectangle = Rect(scene_rectangle.x, scene_rectangle.y, scene_rectangle.width, scene_rectangle.height);
Mat iscene = img_scene(scene_rectangle);
imshow("New Scene", iscene);
cvWaitKey(10);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
detector->detect(iscene, keypoints_scene);
cout << "Scene Keypoints " << keypoints_scene.size() << "\n";
extractor->compute(iscene, keypoints_scene, descriptors_scene);
cout << "Scene Descriptors " << descriptors_scene.size() << "\n";
//-- Writing the keypoints size to the text file
if (myfile.is_open())
{
myfile << keypoints_scene.size();
myfile << "\n";
}
else
cout << "Unable to open file";
//-- Step 3: Matching descriptor vectors using Brute Force matcher
std::vector< DMatch > matches;
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
matcher->match(descriptors_object, descriptors_scene, matches);
cout << "Matches " << matches.size() << "\n";
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
cout << "Good Matches " << good_matches.size() << "\n";
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(iobject, keypoints_object, iscene, keypoints_scene, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("All good matches", img_matches);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene_pts;
minX = scene_rectangle.x;
minY = scene_rectangle.y;
maxX = 0;
maxY = 0;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene_pts.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
if (keypoints_scene[good_matches[i].trainIdx].pt.x < minX)
minX = keypoints_scene[good_matches[i].trainIdx].pt.x;
if (keypoints_scene[good_matches[i].trainIdx].pt.y < minY)
minY = keypoints_scene[good_matches[i].trainIdx].pt.y;
if (keypoints_scene[good_matches[i].trainIdx].pt.x > maxX)
maxX = keypoints_scene[good_matches[i].trainIdx].pt.x;
if (keypoints_scene[good_matches[i].trainIdx].pt.y > maxY)
maxY = keypoints_scene[good_matches[i].trainIdx].pt.y;
}
scene_rectangle.x = minX;
scene_rectangle.y = minY;
if (maxX - minX > 0)
scene_rectangle.width = maxX - minX;
if (maxY - minY > 0)
scene_rectangle.height = maxY - minY;
if (scene_rectangle.width < 100)
scene_rectangle.width = 100;
if (scene_rectangle.height < 200)
scene_rectangle.height = 200;
if (good_matches.size() > 3)
{
Mat H = findHomography(obj, scene_pts, CV_RANSAC);
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(iobject.cols, 0);
obj_corners[2] = cvPoint(iobject.cols, iobject.rows); obj_corners[3] = cvPoint(0, iobject.rows);
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
//-- Show detected matches
imshow("Good Matches & Object detection", img_matches);
}
waitKey(0);
//return 0;
}
}