检测两帧之间的眼睛状态时出错

时间:2017-12-15 09:52:22

标签: python-3.x opencv eye-detection

我试图通过使用物体检测的想法来实现更简单的眨眼版本。所以我的计划是在每个第一帧和第三帧处围绕眼睛区域获取框架并使用结构相似性度量来检查状态。检测两帧之间的变化。但是我的结果出错了。

以下是我的代码

import cv2
import numpy as np
from skimage.measure import compare_ssim

# get the features for face from the file and pass it to the Cascade Classifier
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# get the features for eyes from the file and pass it to the Cascade Classifier
eyes_cascade = cv2.CascadeClassifier("haarcascade_eye.xml")

#we can compare two images using Structural Similarity
#so a small change in pixel value won't prompt this method to term both images as dissimilar
#the closer the value is to 1,the more similar two images are
def ssim(A, B):
    return compare_ssim(A, B, data_range=A.max() - A.min())

#capture a video either from a file or a live video stream
cap = cv2.VideoCapture(0)
current_frame = None
eye_frame_now = None
eye_frame_last = None
#we keep a count of the frames
frame_counter = 0
while True:
    if frame_counter == 0:
        # get the frame
        ret, current_frame = cap.read()
        # once the video is over in case of video files
        if current_frame is None:
            break
        gray_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray_frame, 1.05, 10)
        for face in faces:
            # get the coordinate of the detected face
            x, y, w, h = face
            # draw the rectangle on the image
            # cv2.rectangle(img, (x,y), (x + w, y + h), (0, 255, 0), 2)
            # the idea is instead of looking at the entire image for eyes,we can just look at the detected faces
            # and try to search for eye regions on those faces
            # get the detected face using image slicing
            face_color = current_frame[y: y + h, x: x + w]
            face_gray = gray_frame[y: y + h, x: x + w]
            # cv2.imshow("Sliced out face", gray_frame)
            eyes = eyes_cascade.detectMultiScale(face_gray, 1.11, 4)
            for eye in eyes:
                # get the coordinate of the detected eye
                x1, y1, w1, h1 = eye
                eye_frame = face_gray[y1:y1 + h1, x1:x1 + w1]
                print("Eye Frame", eye_frame)
                if eye_frame is not []:
                    eye_frame_last = cv2.resize(eye_frame, (50, 50))
                # draw a rectangle on the eyes of the face
                cv2.rectangle(face_color, (x1, y1), (x1 + w1, y1 + h1), (255, 0, 0), 2)

    #the idea is instead of comparing two consecutive eye frames,we compare frames that are 3 intervals apart.
    if frame_counter == 3:
        gray_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray_frame, 1.05, 10)
        for face in faces:
            # get the coordinate of the detected face
            x, y, w, h = face
            # draw the rectangle on the image
            # cv2.rectangle(img, (x,y), (x + w, y + h), (0, 255, 0), 2)
            face_color = current_frame[y: y + h, x: x + w]
            face_gray = gray_frame[y: y + h, x: x + w]
            # cv2.imshow("Sliced out face", gray_frame)
            eyes = eyes_cascade.detectMultiScale(face_gray, 1.11, 4)
            for eye in eyes:
                # get the coordinate of the detected eye
                x1, y1, w1, h1 = eye
                eye_frame = face_gray[y1:y1 + h1, x1:x1 + w1]
                if eye_frame is not []:
                    eye_frame_now = cv2.resize(eye_frame, (50, 50))

                # draw a rectangle on the eyes of the face
                cv2.rectangle(face_color, (x1, y1), (x1 + w1, y1 + h1), (255, 0, 0), 2)
        #compare two images based on SSIM
        ssim_val = ssim(eye_frame_now, eye_frame_last)
        print(ssim_val)
        #if there is a major drop in the SSIM value ie it has detected a blink
        if ssim_val < 0.8:
            print("Blinking")

        frame_counter = -1

    #show the video as a series of frames
    cv2.imshow("Eye Detection",current_frame) #(name of the window,image file)
    frame_counter += 1

    key = cv2.waitKey(1) & 0xFF
    if key == ord('q'):
        break

#release the resources allocated to the video file or video stream
cap.release()
#destroy all the windows
cv2.destroyAllWindows()

这是我的输出

0.142644916596
Blinking
0.154798221175
Blinking
0.792948872827
0.584593035048
0.499610556414
0.194123593687
Blinking
0.838011295523
0.579537657669
0.080305786794
Blinking
0.189779706485
Blinking
0.457222729421
0.234885787265
Blinking

正如你所看到的,即使我的眼睛睁开,也会经常发出“闪烁”的信息。我哪里出错了?

0 个答案:

没有答案