我正在使用SURF算法进行实时手部检测。我会让用户将手放在一个矩形中,然后将其用作我的对象,并使用该对象在第一帧中找到手,在下一个循环中将检测到的手设置为我的对象,依此类推...... 但运行时显示以下错误
OpenCV错误:cvFindHomography中的断言失败(计数> = 4),文件/build/buildd/opencv-2.4.8+dfsg1/modules/calib3d/src/fundam.cpp,第235行 在抛出'cv :: Exception'的实例后终止调用 what():/ build / buildd / opencv-2.4.8 + dfsg1 /modules / calib3d / src / fedam.cpp:235:错误:(-215)count> = 4 in function cvFindHomography
这是我的代码:
#include <stdio.h>
#include <iostream>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
using namespace std;
int main()
{
char k;
Mat img_object,img_scene,frame;
VideoCapture cap(0);
if(cap.isOpened()==0)
{
cout<<"ERROR";
return -1;
}
while(1)
{
cap>>frame;
rectangle(frame,Point(100,100),Point(300,300),(0,255,0),4,8,0);
imshow("gig",frame);
k=waitKey(1);
if(k=='q')
{
Mat img_object1(frame,Rect(25,25,100,100));
img_object=img_object1.clone();
cvtColor(img_object,img_object,CV_BGR2GRAY);
break;
}
}
while(1)
{
cap>>img_scene;
cvtColor(img_scene,img_scene,CV_BGR2GRAY);
int minHessian = 50;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_object.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
Mat nn(img_scene,Rect((scene_corners[0] ).x,(scene_corners[0] ).y,(scene_corners[2] ).x,(scene_corners[2]).y));
imshow( "Good Matches & Object detection", img_matches );
img_object=nn.clone();
k=waitKey(1);
if(k=='q')
{
break;
}
}
return 0;
}