具有多个面部的面部识别不会检测多个面部

时间:2017-05-09 09:17:29

标签: python opencv face-recognition lbph-algorithm

我编写了一个小程序,可以检测人脸并将其保存到Train文件中进行识别。

我对这个算法有些麻烦。有时它会抛出错误,LBPH :: Train被提供空数据,这是错误的。

  

OpenCV错误:不支持的格式或格式组合(给出了空的训练数据。你需要不止一个样本来学习模型。)在cv :: LBPH :: train,file ..... ... \ opencv \ modules \ contrib \ src \ facerec.cpp,第917行   回溯(最近一次调用最后一次):

此外,该算法检测到多个面部,但将其识别为同一个面部,这是错误的。

有人能给我一些关于我失踪的提示吗?

import cv2
import os
import numpy as np
import sys


i = 0
global allFaces
global first
first = True
allFaces = []
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
recognizer = cv2.createLBPHFaceRecognizer()
font=cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_COMPLEX_SMALL,1,1,0,1)

id = 0
class Face:
    def __init__(self, id, face):
        self.id = id
        self.face = face
        self.gatheredFaces = []
    def main(self):
        print("main")
    def getFace(self):
        return self.face
    def setKnownFace(self):
        self.known = False
    def getKownFace(self):
        return self.knwon
    def getId(self):
        return  self.id
    def setFacesInfo(self, frame, face):
        x,y,h,w = face
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        self.gatheredFaces.append(gray[y:y+h, x:x+w])
#         count = 0
#         while (count != 10):
#             gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#             cv2.imshow("frame in set", frame)
#             faces = faceCascade.detectMultiScale(gray)
#             for face in faces:
#                 self.gatheredFaces.append(gray[y:y+h,x:x+w])
#                 cv2.imshow("gathered Faces", self.gatheredFaces[0])
#                 cv2.imwrite("dataSet/User"+ str(self.getId()) +".jpg", gray)
#             count = count+1
#             cv2.waitKey(30)

    def getFacesInfo(self):
        return self.gatheredFaces

    def trainDetector(self):
        faceSamples = []
        Ids = []
        print("laenge von gathered FAces")
        print(len(allFaces[0].getFacesInfo()))
        for (i) in range(len(allFaces)):
            temp = allFaces[i].getFacesInfo()
            for (j) in range(len(temp)):
                imageNP = np.array(temp[j], 'uint8')
                id = allFaces[i].getId()
                faces = faceCascade.detectMultiScale(imageNP)
                for (x,y,h,w) in faces:
                    faceSamples.append(imageNP)
                    Ids.append(id)             

        recognizer.train(faceSamples, np.array(Ids))
        recognizer.save('recognizer/train.yml')

    def updateDetector(self):
        recognizer.load('recognizer/train.yml')
        faceSamples = []
        Ids = []
        for (i) in range(len(allFaces)):
            temp = allFaces[i].getFacesInfo()
            for (j) in range(len(temp)):
                imageNP = np.array(temp[j], 'uint8')
                id = allFaces[i].getId()
                faces = faceCascade.detectMultiScale(imageNP)
                for (x,y,h,w) in faces:
                    faceSamples.append(imageNP)
                    Ids.append(id)



        recognizer.update(faceSamples, np.array(Ids))
        recognizer.save('recognizer/train.yml')

while True:
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cv2.imshow("actual Frame", frame)
    cv2.imshow("gray", gray)
    faces = faceCascade.detectMultiScale(gray, 1.3, 5)
    print(faces)
    for face in faces:
        x,y,h,w = face
        temp = Face(id, frame[y:y+h,x:x+w])
        allFaces.append(temp)
        temp = None
        id = id+1
        ###Detector
        detector = cv2.SIFT()
        FLANN_INDEX_KDTREE = 0
        flannParam = dict(algorithm = FLANN_INDEX_KDTREE, tree = 5)
        flann = cv2.FlannBasedMatcher(flannParam,{})
        trainImg = allFaces[0].getFace()
        trainKP, trainDecs = detector.detectAndCompute(trainImg, None)

        if((len(allFaces)==1) and first):
            print("only one object in allFaces")
            for i in range(10):
                print(i)
                allFaces[0].setFacesInfo(frame, face)
            allFaces[0].trainDetector()
            first = False

        else:
            for(i) in range(len(allFaces)):
                QueryImg = cv2.cvtColor(allFaces[i].getFace(), cv2.COLOR_BGR2GRAY)
                queryKP, queryDesc = detector.detectAndCompute(QueryImg, None)
                matches = flann.knnMatch(queryDesc, trainDecs, k = 2)
                goodMatch = []
                for m, n in matches:
                    if(m.distance < 0.75 * n.distance):
                        goodMatch.append(m)
                if(len(goodMatch) > 30):
                    print("good match")
                    #allFaces[i].
                    tp = []
                    qp = []
                    for m in goodMatch:
                        tp.append(trainKP[m.trainIdx].pt)
                        qp.append(queryKP[m.queryIdx].pt)
                    tp, qp = np.float32((tp, qp))
                    H, status = cv2.findHomography(tp, qp, cv2.RANSAC, 3.0)
                    allFaces.pop(len(allFaces)-1)
                    break
                else:
                    print ("bad match")
                    for i in range(10):
                        allFaces[len(allFaces)-1].setFacesInfo(frame, face)

                    allFaces[len(allFaces)-1].updateDetector()
                    cv2.waitKey(10)

        for (x,y,w,h) in faces:
            cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255),2)
            tempid, conf = recognizer.predict(gray[y:y+h,x:x+w])
            cv2.cv.PutText(cv2.cv.fromarray(frame), str(tempid),(x,y+h),font,(0,0,255))
            cv2.waitKey(30)

        cv2.imshow("detectedFace", frame)
        cv2.waitKey(30)

0 个答案:

没有答案