使用DLIB进行面部对齐检查

时间:2017-05-12 12:49:46

标签: opencv dlib

我只是想知道是否有可能检测到面部是否正确对齐,直接连接到DLIB和OpenCV的摄像头?

enter image description here

我尝试使用此代码来检测形状并获取面部点:

enter image description here

detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])

vs = VideoStream(0).start()

    while True:
        # grab the frame from the threaded video stream, resize it to
        # have a maximum width of 400 pixels, and convert it to
        # grayscale
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # detect faces in the grayscale frame
        rects = detector(gray, 0)

        # loop over the face detections
        for rect in rects:
            # determine the facial landmarks for the face region, then
            # convert the facial landmark (x, y)-coordinates to a NumPy
            # array
            shape = predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            # loop over the (x, y)-coordinates for the facial landmarks
            # and draw them on the image
            for (x, y) in shape:
                print x,y
                cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
            cv2.putText(frame, "Aptiktas veidas", (10, 30),
                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        # show the frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

2 个答案:

答案 0 :(得分:3)

下面是我写的一个函数,用于查找人头所面对的姿势。这里p1和p2定义了姿势向量。计算它们之间的角度是微不足道的,并且基于角度,您可以决定接受或丢弃哪个图像。

def pose_estimate(image, landmarks):
    """
    Given an image and a set of facial landmarks generates the direction of pose
    """
    size = image.shape
    image_points = np.array([
        (landmarks[33, 0], landmarks[33, 1]),     # Nose tip
        (landmarks[8, 0], landmarks[8, 1]),       # Chin
        (landmarks[36, 0], landmarks[36, 1]),     # Left eye left corner
        (landmarks[45, 0], landmarks[45, 1]),     # Right eye right corner
        (landmarks[48, 0], landmarks[48, 1]),     # Left Mouth corner
        (landmarks[54, 0], landmarks[54, 1])      # Right mouth corner
        ], dtype="double")

    model_points = np.array([
        (0.0, 0.0, 0.0),             # Nose tip
        (0.0, -330.0, -65.0),        # Chin
        (-225.0, 170.0, -135.0),     # Left eye left corner
        (225.0, 170.0, -135.0),      # Right eye right corner
        (-150.0, -150.0, -125.0),    # Left Mouth corner
        (150.0, -150.0, -125.0)      # Right mouth corner
        ])

    focal_length = size[1]
    center = (size[1]/2, size[0]/2)
    camera_matrix = np.array([
        [focal_length, 0, center[0]],
        [0, focal_length, center[1]],
        [0, 0, 1]
        ], dtype="double")

    dist_coeffs = np.zeros((4, 1))
    success, rotation_vector, translation_vector = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs)     
    (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)
    p1 = (int(image_points[0][0]), int(image_points[0][1]))
    p2 = (int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))
    return p1, p2

答案 1 :(得分:0)

您可以从dlib获取积分,并在opencv中使用它。

import cv2
import numpy as np
from facial_landmarks import calc_points
import sys 

# Read Image
im = cv2.imread('img.jpg')
if im is None:
    print('Image not found')
    sys.exit()
size = im.shape

facial_points = calc_points('img.jpg')
if len(facial_points) == 0:
    print('Face not Detected')
    sys.exit()

cl = cr = 0

for i in range(len(facial_points)):
    #2D image points. If you change the image, you need to change vector
    image_points = np.array([
                                facial_points[i]['nose'],     # Nose tip
                                facial_points[i]['chin'],     # Chin
                                facial_points[i]['lefteye'],     # Left eye left corner
                                facial_points[i]['righteye'],     # Right eye right corne
                                facial_points[i]['leftmouth'],     # Left Mouth corner
                                facial_points[i]['rightmouth']      # Right mouth corner
    ], dtype='double')

    # 3D model points.
    model_points = np.array([
                                (0.0, 0.0, 0.0),             # Nose tip
                                (0.0, -330.0, -65.0),        # Chin
                                (-225.0, 170.0, -135.0),     # Left eye left corner
                                (225.0, 170.0, -135.0),      # Right eye right corne
                                (-150.0, -150.0, -125.0),    # Left Mouth corner
                                (150.0, -150.0, -125.0)      # Right mouth corner

                            ])


    # Camera internals

    focal_length = size[1]
    center = (size[1]/2, size[0]/2)
    camera_matrix = np.array(
                            [[focal_length, 0, center[0]],
                            [0, focal_length, center[1]],
                            [0, 0, 1]], dtype = 'double'
                            )

    dist_coeffs = np.zeros((4,1)) # Assuming no lens distortion
    (success, rotation_vector, translation_vector) = cv2.solvePnP(model_points, image_points, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)

    (nose_end_point2D, jacobian) = cv2.projectPoints(np.array([(0.0, 0.0, 1000.0)]), rotation_vector, translation_vector, camera_matrix, dist_coeffs)

    for p in image_points:
        cv2.circle(im, (int(p[0]), int(p[1])), 3, (0,0,255), -1)


    p1 = ( int(image_points[0][0]), int(image_points[0][1]))
    p2 = ( int(nose_end_point2D[0][0][0]), int(nose_end_point2D[0][0][1]))

    cv2.line(im, p1, p2, (255,0,0), 2)

    if p1[0] > p2[0]:
        cl += 1
    else:
        cr += 1

if cl > cr:
    text = 'left'
else:
    text = 'right'
# Display image
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(im, text,(10,50), font, 0.5,(0,0,255),1,cv2.LINE_AA)
cv2.imshow('Output', im)
cv2.waitKey(0)

我编写了这段代码以确定该人是向左转还是向右转。