我有一个包含一组对象的黑白图像。我希望能够找到这些对象。我几乎可以肯定图像中的对象大小和形状几乎完全相同。
这样做的一种方法是将图像中的对象逐个移动到图像上,直到我在图像中的像素和样本对象中的像素之间得到合理的匹配。我可以为此编写自己的代码,但我宁愿不重新发明轮子。 AForge或EMGU / OpenCV中有没有这样做呢?
答案 0 :(得分:2)
您可以使用openCV的匹配模板功能。此函数采用模板“您的对象集”,并将它们与您认为相同对象所在的图像进行比较。
答案 1 :(得分:1)
你在openCV中有matchTemplate基本上就是你所说的。
答案 2 :(得分:1)
您应该使用匹配,请参考以下示例代码:
// Object_Matching_surf.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
#include <opencv2/nonfree/features2d.hpp>
using namespace cv;
#include <stdio.h>
#include "stdafx.h"
#include <iostream>
#include"cv.h"
#include"highgui.h"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
int object_detection(Mat, Mat, char*);
Mat rotateImage(const Mat&, double);
int main()
{
char inputImgName[50];
char tmplateName[50];
char outputImgName[50];
for (int i=1; i<10; i++)
{
sprintf(inputImgName, "%s%d%s","C:/Matches1/img",i,".png");
sprintf(tmplateName, "%s%d%s","C:/Matches1/gripper",i,".png");
sprintf(outputImgName, "%s%d%s","C:/Matches1/matches_img",i,".png");
Mat templateImage = imread(tmplateName);//, CV_LOAD_IMAGE_GRAYSCALE );
Mat inputImage = imread(inputImgName);//, CV_LOAD_IMAGE_GRAYSCALE );
char * outPutImage = outputImgName;
object_detection (templateImage, inputImage, outPutImage);
}
//Resize the template image
/*Mat newTemlate;
resize(templateImage,newTemlate, Size(100,100));*/
//Rotate the template
/*Mat rotatedTemplate = rotateImage(templateImage, 90.0);
imwrite("C:/Matches0/rotated.jpg" ,rotatedTemplate);*/
return 0;
}
int object_detection(Mat templateImage, Mat inputImage, char* outPutImage)
{
//Detect the keypoints using SURF Detector
int minHessian = 500;
SiftFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( templateImage, kp_object );
//Calculate descriptors (feature vectors)
SiftDescriptorExtractor extractor;
Mat des_object;
extractor.compute( templateImage, kp_object, des_object );
FlannBasedMatcher matcher;
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( templateImage.cols, 0 );
obj_corners[2] = cvPoint( templateImage.cols, templateImage.rows );
obj_corners[3] = cvPoint( 0, templateImage.rows );
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
detector.detect( inputImage, kp_image );
extractor.compute( inputImage, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( templateImage, kp_object, inputImage, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( templateImage.cols, 0), scene_corners[1] + Point2f( templateImage.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( templateImage.cols, 0), scene_corners[2] + Point2f( templateImage.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( templateImage.cols, 0), scene_corners[3] + Point2f( templateImage.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( templateImage.cols, 0), scene_corners[0] + Point2f( templateImage.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Computing the center
Point2f p1 = scene_corners[0] + Point2f( templateImage.cols, 0);
Point2f p2 = scene_corners[1] + Point2f( templateImage.cols, 0);
Point2f p3 = scene_corners[2] + Point2f( templateImage.cols, 0);
Point2f p4 = scene_corners[3] + Point2f( templateImage.cols, 0);
Point2f center = (p3*0.5+p1*0.5);
char p1text[20];
sprintf(p1text, "(%d,%d)",(int)p1.x, (int)p1.y );
putText(img_matches, p1text, p1, FONT_HERSHEY_SIMPLEX, 0.3, cvScalar(255, 255, 255, 0));
char p2text[20];
sprintf(p2text, "(%d,%d)",(int)p2.x, (int)p2.y );
putText(img_matches, p2text, p2, FONT_HERSHEY_SIMPLEX, 0.3, cvScalar(255, 255, 255, 0));
char p3text[20];
sprintf(p3text, "(%d,%d)",(int)p3.x, (int)p3.y );
putText(img_matches, p3text, p3, FONT_HERSHEY_SIMPLEX, 0.3, cvScalar(255, 255, 255, 0));
char p4text[20];
sprintf(p4text, "(%d,%d)",(int)p4.x, (int)p4.y );
putText(img_matches, p4text, p4, FONT_HERSHEY_SIMPLEX, 0.3, cvScalar(255, 255, 255, 0));
char centertext[20];
sprintf(centertext, "(%d,%d)",(int)center.x, (int)center.y );
putText(img_matches, centertext, center, FONT_HERSHEY_SIMPLEX, 0.3, cvScalar(255, 0, 0, 250));
system("pause");
/* Save the image of matches */
imwrite(outPutImage, img_matches);
return 0;
}