我正在尝试一个非常简单的程序来检测网络摄像头Feed中的面孔。当我的脸在框架的中心时,我注意到脸部被很好地检测到了。当我向侧面移动一点时,面部检测器要么完全错过了我的脸,要么没有检测到。 这种偏见是因为我使用函数的方式(附加代码)还是HAAR分类器中的固有偏差? 请注意,在任何一种情况下(我的脸部位于框架的大致中心或我的脸部位于边界附近),我的脸部是完全可见的,即侧面轮廓/或切割脸部。
//A live face detector Program. Takes feed from the camera and detects face in the given frame
#include<opencv2/opencv.hpp>
#include<iostream>
#include<vector>
#include"opencv2/video/video.hpp"
using namespace cv;
using namespace std;
int main(){
cv::Mat frame;
cv::VideoCapture cap(0);
cv::namedWindow("Frame");
do{
cap >> frame;
Rect r1,r2;
vector<Rect> faces1,faces2;
CascadeClassifier cascade1;
CascadeClassifier cascade2;
//cascade1.load("C:/opencv2.4.9/sources/data/lbpcascades/lbpcascade_frontalface.xml");
cascade1.load("C:/opencv2.4.9/sources/data/haarcascades/haarcascade_frontalface_alt2.xml");
cascade2.load("C:/opencv2.4.9/sources/data/lbpcascades/lbpcascade_profileface.xml");
cascade1.detectMultiScale(frame, faces1,1.05, 6, CV_HAAR_FIND_BIGGEST_OBJECT, Size(0, 0));
cascade2.detectMultiScale(frame, faces2,1.05, 6, CV_HAAR_FIND_BIGGEST_OBJECT, Size(0, 0));
if (faces1.size()!=0){
cout << "face1 found";
r1 = faces1[0];
}
if (faces2.size()!=0){
cout << "face2 found";
r2 = faces2[0];
}
rectangle(frame, Point(r1.y,r1.x), Point(r1.y+r1.height,r1.x+r1.width), Scalar(0,255,0),2, 8);
rectangle(frame, Point(r2.y,r2.x), Point(r2.y+r2.height,r2.x+r2.width), Scalar(255,0,0),2, 8);
imshow("Frame",frame);
}while(waitKey(30) < 0);
cap.release();
return 0;
}
答案 0 :(得分:1)
您的haar分类器代码运行良好。在您的代码中更改此
rectangle(frame, Point(r1.y,r1.x), Point(r1.y+r1.height,r1.x+r1.width), Scalar(0,255,0),2, 8);
rectangle(frame, Point(r2.y,r2.x), Point(r2.y+r2.height,r2.x+r2.width), Scalar(255,0,0),2, 8);
到
rectangle(frame, Point(r1.x, r1.y), Point(r1.x + r1.width, r1.y + r1.height), Scalar(0, 255, 0), 2, 8);
rectangle(frame, Point(r2.x, r2.y), Point(r2.x + r2.width, r2.y + r2.height), Scalar(255, 0, 0), 2, 8);
它会起作用。你已经改变了x,y值。