使用BruteForceMatcher或FlannBasedMatcher

时间:2016-04-05 13:11:05

标签: c++

Iam尝试使用ORB探测器/描述符和Flann或暴力强制匹配器在更大的目标图像(包含3个面的allimg.jpg)中识别源图像(c1.jpg-面部)。 c1.jpg是通过裁剪/复制从allimg.jpg创建的。 ORB检测器/描述符正常工作返回检测器/描述符正确但Flann或暴力强制匹配器为目标提供不正确的匹配结果。结果当我进一步尝试使用findHomography()时,它显示不正确的结果,将源映射到某处否则在目的地而不是目的地的正确面孔(allimg)。 虽然没有显示下面的代码,但是在Knnmatch之后,我在匹配后在c1.jpg和allimag.jpg上绘制了一个边界矩形并显示了图像。我发现源边界矩形是正确的但是allimag的边界矩形非常非常大而且包括源脸。它应该刚刚在目的地找到了源脸。 我使用opencv 3.0。 有人遇到过这样的问题吗?是否有其他匹配器可以准确地找到目的地中的源图像(面部或任何东西)?

我已经给出了下面的代码和图片(由链接给出):

#include <opencv2/core/core.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2/features2d/features2d.hpp>

using namespace std;
using namespace cv;

const double nn_match_ratio = 0.80f; // Nearest-neighbour matching ratio
const double ransac_thresh = 2.5f; // RANSAC inlier threshold
const int bb_min_inliers = 100; // Minimal number of inliers to draw BBox     

Mat img1;
Mat img2;

bool refineMatchesWithHomography(const vector<cv::KeyPoint>& queryKeypoints,    
const vector<cv::KeyPoint>& trainKeypoints,     
float reprojectionThreshold,    
vector<cv::DMatch>& matches,    
Mat& homography  )  
{  
const int minNumberMatchesAllowed = 4;    
if (matches.size() <minNumberMatchesAllowed)    
return false;    
// Prepare data for cv::findHomography    
vector<cv::Point2f> queryPoints(matches.size());    
std::vector<cv::Point2f> trainPoints(matches.size());    
for (size_t i = 0; i <matches.size(); i++)    
{    
queryPoints[i] = queryKeypoints[matches[i].queryIdx].pt;    
trainPoints[i] = trainKeypoints[matches[i].trainIdx].pt;    
}    
// Find homography matrix and get inliers mask    
std::vector<unsigned char> inliersMask(matches.size());    
homography = findHomography(queryPoints,     
trainPoints,     
CV_FM_RANSAC,     
reprojectionThreshold,     
inliersMask);    
vector<cv::DMatch> inliers;    
for (size_t i=0; i<inliersMask.size(); i++)    
{    
if (inliersMask[i])    
inliers.push_back(matches[i]);    
}    
matches.swap(inliers);  
Mat homoShow;  
drawMatches (img1,queryKeypoints,img2,trainKeypoints,matches,homoShow,  

Scalar::all(-1),CV_RGB(255,255,255), Mat(),  2);       

imshow("homoShow",homoShow); 


waitKey(100000);
return matches.size() > minNumberMatchesAllowed;   

}  




int main()
{
//Stats stats;
vector<String> fileName;

fileName.push_back("D:\\pmn\\c1.jpg");
fileName.push_back("D:\\pmn\\allimg.jpg");

img1 = imread(fileName[0], CV_LOAD_IMAGE_COLOR);
img2 = imread(fileName[1], CV_LOAD_IMAGE_COLOR);

if (img1.rows*img1.cols <= 0)
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
}
if (img2.rows*img2.cols <= 0)
{
cout << "Image " << fileName[1] << " is empty or cannot be found\n";
return(0);
}

// keypoint  for img1 and img2
vector<KeyPoint> keyImg1, keyImg2;
// Descriptor for img1 and img2

Mat descImg1, descImg2;


Ptr<Feature2D> porb = ORB::create(500,1.2f,8,0,0,2,0,14);


porb->detect(img2, keyImg2, Mat());
// and compute their descriptors with method  compute
porb->compute(img2, keyImg2, descImg2);

// We can detect keypoint with detect method
porb->detect(img1, keyImg1,Mat());
// and compute their descriptors with method  compute
porb->compute(img1, keyImg1, descImg1);


//FLANN parameters

//  Ptr<flann::IndexParams> indexParams = 
makePtr<flann::LshIndexParams> (6, 12, 1);     

//  Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>                                                           
(50);   

String itMatcher = "BruteForce-L1";

Ptr<DescriptorMatcher> 

matdescriptorMatchercher(newcv::BFMatcher(cv::NORM_HAMMING, false)); 

vector<vector<DMatch> > matches,bestMatches;
vector<DMatch> m;

matdescriptorMatchercher->knnMatch(descImg1, descImg2, matches,2);

const float minRatio = 0.95f;//1.f / 1.5f; 
for (int i = 0; i<matches.size(); i++)
{
if(matches[i].size()>1)     
{
DMatch& bestMatch = matches[i][0];  
DMatch& betterMatch = matches[i][1];  
float distanceRatio = bestMatch.distance / betterMatch.distance;  
if (distanceRatio <minRatio)  
{
bestMatches.push_back(matches[i]);
m.push_back(bestMatch);
}
}
}


Mat homo;  
float homographyReprojectionThreshold = 1.0;  
bool homographyFound = refineMatchesWithHomography(  
keyImg1,keyImg2,homographyReprojectionThreshold,m,homo);  

return 0;
}

[c1.jpg][1]

[allimg.jpg][2]


[1]: http://i.stack.imgur.com/Uuy3o.jpg
[2]: http://i.stack.imgur.com/Kwne7.jpg

1 个答案:

答案 0 :(得分:0)

感谢EdChum。我使用了链接上给出的代码(ratiotest / symmetrytest),只有当源图像是目标的一部分时才提供有点好的图像匹配,尽管它不够准确。请注意,我确实注释掉了最后一个ransacTest,因为它不必要地删除了很多正面因素。 我附上了2张图片(source.jpg / destination.jpg),它将通过突出显示目的地的匹配部分来显示我所说的内容。 是否有任何算法更准确/正确(> 90%)识别目的地中的来源?

此外,如果源是类似的图像(并且不像目的地那样精确),我发现目标图像匹配是关闭的,没用。我对吗? 请分享您的观点。 1 =来源,2 =目的地