Eye Blink检测OpenCV C ++

时间:2015-09-15 08:20:54

标签: c++ opencv face-recognition

我正在尝试编写一只眨眼检测器,但它无法正常工作。

它正确地找到了脸部和眼睛,然后我试图在眼部区域(瞳孔)找到一个圆圈,但并不总能找到它。找到 时,虽然眼睛没有闪烁,但它会检测到闪烁(计数器会递增)。

我尝试了不同的方法和过滤器(HoughCircles,Canny,threshold,medianBlur,smooth),但它没有改变。

这是我的代码:

#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include <vector>
#include <math.h>
#include <iomanip>
#include <sstream>
#include <string>

#include <opencv2\objdetect\objdetect.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\video\tracking.hpp>
#include "opencv2/opencv.hpp"

// Include OpenCV's C++ Interface
#include "opencv2/opencv.hpp"


const float eyeTop = 0.26f;     //y
const float eyeSide = 0.16f;  //x
const float eyeHeight = 0.28f;  //h
const float eyeWidth = 0.65f;       //w
const int calibrationDefault = 100;

int blinknumber =0;
int calibrationFace = calibrationDefault;

bool leftEyeOpen = true;
bool rightEyeOpen = true;
int blinkNumberLeft = 0;
int blinkNumberRight = 0;

using namespace cv;
using namespace std;

const std::string casceye_name = "C:\\Users\\Hossein Hezarpisheh\\Documents\\Visual Studio 2010\\Projects\\projek1\\haarcascade_mcs_eyepair_big.xml";
const std::string face_cascade_name = "C:\\Users\\Hossein Hezarpisheh\\Documents\\Visual Studio 2010\\Projects\\projek1\\haarcascade_frontalface_alt.xml";

Mat lastFace;
Mat actualFace;

void headTracing(Mat grayImage, Mat image, CascadeClassifier casceye, CascadeClassifier cascFace, Rect &faceArea);
Rect detectLargestObject(Mat grayImage, CascadeClassifier cascFace);
void eyeTracking(Mat &actualFace, Mat &lastFace,int &blinknumber);
void getEyes(Mat &face, Mat &eye);

namespace patch
{
template < typename T > string to_string(T n )
{
    ostringstream stm ;
    stm << n ;
    return stm.str() ;
}
}

int main()
{
 Rect faceArea; 
 CascadeClassifier cascFace, casceye;

if (!cascFace.load(face_cascade_name)){ printf("--(!)Error loading face cascade\n"); return -1; };
if (!casceye.load(casceye_name)){ printf("--(!)Error loading eyes cascade\n"); return -1; };


cout << "\n\tESC - Programm beenden\n\tc - zaehler auf 0 setzen\n\n";

namedWindow("Blinzel Erkennung", CV_WINDOW_AUTOSIZE);

VideoCapture capture(0);
if (!capture.isOpened())
{
    cout<<"Kamera wurde nicht gefunden!"<<endl;
    return 1;
}

Mat Image;

while (1)
{
    Mat GrayImage;
    capture >> Image;

    if (Image.empty()){                                         
        continue;
    }

    flip(Image, Image, 1);                                  
    cvtColor(Image, GrayImage, CV_BGR2GRAY);

    headTracing(GrayImage, Image, casceye , cascFace, faceArea);        


    switch (waitKey(2)) {
    case 27:
 return 0;                                                              
        break;
case120:                                                                        
        calibrationFace = 0;
        break;

    case 99:                                                                        // c key - zähler auf 0 setzen
        leftEyeOpen = true;
        rightEyeOpen = true;
        blinkNumberLeft = 0;
        blinkNumberRight = 0;
        break;
    }
}
return 0;
}




void calcFlow(const Mat& flow, Mat& cflowmap, int step, int &globalMoveX, int &globalMoveY)
{
int localMoveX = 0;
int localMoveY = 0;

for (int y = 0; y < cflowmap.rows; y += step)
{
    for (int x = 0; x < cflowmap.cols; x += step)
    {
        const Point2f& fxy = flow.at<Point2f>(y, x);

        localMoveX = localMoveX + fxy.x;
        localMoveY = localMoveY + fxy.y;
        }
    }

globalMoveX = (localMoveX / (cflowmap.cols * cflowmap.rows))*2;                         
globalMoveY = (localMoveY / (cflowmap.rows * cflowmap.cols))*2;
}


void headTracing(Mat grayImage, Mat image, CascadeClassifier casceye, CascadeClassifier cascFace, Rect &faceArea) {

Rect face = detectLargestObject(grayImage, cascFace);
if (face.width == 0 && face.height == 0) {
    imshow("Ergebnis", image);                                  
    return;                                                             
}

calibrationFace = calibrationFace - 1;

if (faceArea.height == 0|| calibrationFace < 1) {           
    faceArea = face;
    lastFace = grayImage(face);
    calibrationFace = calibrationDefault;                           
}
else {                                                      

    actualFace = grayImage(faceArea);

    Mat flow, cflow;
    calcOpticalFlowFarneback(lastFace, actualFace, flow, 0.5, 3, 15, 3, 5, 1.2, 0);

    cvtColor(lastFace, cflow, CV_GRAY2BGR);

    int globalMoveX, globalMoveY;

    calcFlow(flow, cflow, 1, globalMoveX, globalMoveY);


    faceArea.x = faceArea.x + globalMoveX;      
    faceArea.y = faceArea.y + globalMoveY;

    if (faceArea.x < 0) {                                       
        faceArea.x = 0;
    }
    if (faceArea.y < 0) {
        faceArea.y = 0;
    }

    if (faceArea.x + faceArea.width > image.size().width - 1) {     
        faceArea.x = image.size().width - faceArea.width - 1;
    }
    if (faceArea.y + faceArea.height > image.size().height - 1) {
        faceArea.y = image.size().height - faceArea.height - 1;
    }
    //rectangle(image,faceArea, 12);                
    actualFace = grayImage(faceArea);               


    eyeTracking(actualFace, lastFace,blinknumber);   //jetzt haben wir zwei stabilisierte Frames(aktuell&vorherige) nun können wir die Bewegung berechnen
    swap(lastFace, actualFace);  //aktuelles Frame wird zu vorherigen Frame und umgekehrt
}
putText(image,patch::to_string(blinknumber), cvPoint(520, 45), FONT_HERSHEY_COMPLEX_SMALL, 1.5, cvScalar(100, 100, 255), 1, CV_AA);

imshow("Ergebnis", image);       //Ergebniss anzeigen
}

Rect detectLargestObject(Mat grayImage, CascadeClassifier cascFace)  {

Rect value;

vector<Rect> faces;
cascFace.detectMultiScale(grayImage, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE | CV_HAAR_FIND_BIGGEST_OBJECT, Size(150, 150), Size(300, 300));
if (faces.size() > 0) {
    return faces[0];
}
else {
    return value;
}
}

void eyeTracking(Mat &actualFace, Mat &lastFace,int &blinknumber) {

Mat eyeActual;
getEyes(actualFace, eyeActual);
Mat eyeActualGray;
cvtColor(eyeActual, eyeActualGray, COLOR_GRAY2BGR);

//medianBlur(eyeActual, eyeActual,5);
//cvtColor(eyeActual, eyeActualGray, COLOR_BGR2GRAY);

namedWindow("Kreis", CV_WINDOW_AUTOSIZE);

//Canny(eyeActual,eyeActual,5,70,3);
medianBlur(eyeActual, eyeActual,5);
//threshold(eyeActual,eyeActual,50,200,THRESH_BINARY);
//vector<vector<Point> > contours;

vector <Vec3f> circles;

//findContours(eyeActual.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
//drawContours(eyeActual, contours, -1, CV_RGB(255,255,255), -1);

HoughCircles( eyeActual, circles, CV_HOUGH_GRADIENT, 1, eyeActual.rows/8, 50,25,5,15 );

for( size_t i = 0; i < circles.size(); i++ )
  {

    Vec3i c = circles[i];
    //circle(eyeActualGray,Point(c[0], c[1]), c[2], Scalar(0,0,255), 2);
    circle( eyeActualGray, Point(c[0], c[1]), 2, Scalar(0,255,0), 2);

    blinknumber=blinknumber+1;

}
    imshow("Kreis", eyeActualGray);

}

void getEyes(Mat &face, Mat &eye) {

Size faceSize = face.size();

int eye_area_width = faceSize.width * eyeWidth;
int eye_area_height = faceSize.width *eyeHeight;
int eye_area_top = faceSize.height *eyeTop;

Rect rightEyeArea(faceSize.width*eyeSide, eye_area_top, eye_area_width, eye_area_height);
eye = face(rightEyeArea);
}

1 个答案:

答案 0 :(得分:0)

问题的描述听起来像是模式工程问题,而不是C ++问题或OpenCV问题。如果我理解正确的话,你的眼睛检测效果“相当好”。如上所述的眨眼检测通过不再检测眼睛起作用。这意味着您可以通过检测每个帧中的眼睛来检测不闪烁。这需要一个非常好的眼睛探测器。缺少一次检测会导致眨眼结束。

一种更强大的方法可以记住眼睛周围的情况,并寻找像素的大变化 - 眼睑看起来完全不同。

<强>然而即可。看看你的代码,我没有看到实现你所描述的任何代码的代码。您拨打HoughCircles来检测圈子。然后,对于检测到的每个圆圈,您将一个添加到闪烁计数器?

此外,你有一个blinkCounterLeft和blinkCounterRight似乎未使用。结合不一致的缩进和注释掉的代码,我怀疑你已经失去了对自己代码的监督。