如何检测dlib的correlation_tracker丢失目标图像的位置?

时间:2016-08-21 22:41:52

标签: python computer-vision dlib

我在一个多线程的面部tracking script添加了一个correlation_tracker,奇怪的是,它通常可以很好地跟踪屏幕上的一个面部,但是当你把手放在相机上时,它会一直说相同的坐标并突出显示同一地区。是否有一种很好的方法可以检测被跟踪对象何时实际离开?或者这是否需要偶尔处理较慢的detect-all-the-faces检测器对象?

from __future__ import division
import sys
from time import time, sleep
import threading

import dlib
#from skimage import io


detector = dlib.get_frontal_face_detector()
win = dlib.image_window()

def adjustForOpenCV( image ):
    """ OpenCV use bgr not rgb. odd. """
    for row in image:
        for px in row:
            #rgb expected... but the array is bgr?
            r = px[2]
            px[2] = px[0]
            px[0] = r
    return image

class webCamGrabber( threading.Thread ):
    def __init__( self ):
        threading.Thread.__init__( self )
        #Lock for when you can read/write self.image:
        #self.imageLock = threading.Lock()
        self.image = False

        from cv2 import VideoCapture, cv
        from time import time

        self.cam = VideoCapture(0)  #set the port of the camera as before
        #Doesn't seem to work:
        self.cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, 160)
        self.cam.set(cv.CV_CAP_PROP_FRAME_WIDTH, 120)
        #self.cam.set(cv.CV_CAP_PROP_FPS, 1)


    def run( self ):
        while True:
            start = time()
            #self.imageLock.acquire()
            retval, self.image = self.cam.read() #return a True bolean and and the image if all go right
            #print( "readimage: " + str( time() - start ) )
            #sleep(0.1)

if len( sys.argv[1:] ) == 0:

    #Start webcam reader thread:
    camThread = webCamGrabber()
    camThread.start()

    #Setup window for results
    detector = dlib.get_frontal_face_detector()
    win = dlib.image_window()

    while True:
        #camThread.imageLock.acquire()
        if camThread.image is not False:
            print( "enter")
            start = time()

            myimage = camThread.image
            for row in myimage:
                for px in row:
                    #rgb expected... but the array is bgr?
                    r = px[2]
                    px[2] = px[0]
                    px[0] = r


            dets = detector( myimage, 0)
            #camThread.imageLock.release()
            print "your faces:" +str( len(dets) )
            nearFace = None
            nearFaceArea = 0

            for i, d in enumerate( dets ):
                #print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
                #    i, d.left(), d.top(), d.right(), d.bottom()))
                screenArea = (d.right() - d.left()) * (d.bottom() - d.top())
                #print 'area', screenArea
                if screenArea > nearFaceArea:
                    nearFace = d
            print( "face-find-time: " + str( time() - start ) )

            print("from left: {}".format( ( (nearFace.left() + nearFace.right()) / 2 ) / len(camThread.image[0]) ))
            print("from top: {}".format( ( (nearFace.top() + nearFace.bottom()) / 2 ) / len(camThread.image)) )

            start = time()
            win.clear_overlay()
            win.set_image(myimage)
            win.add_overlay(nearFace)
            print( "show: " + str( time() - start ) )

            if nearFace != None:
                points = (nearFace.left(), nearFace.top(), nearFace.right(), nearFace.bottom() )
                tracker = dlib.correlation_tracker()
                tracker.start_track( myimage, dlib.rectangle(*points))

                while True:
                    myImage = adjustForOpenCV( camThread.image )

                    tracker.update( myImage )
                    rect = tracker.get_position()
                    cx = (rect.right() + rect.left()) / 2
                    cy = (rect.top() + rect.bottom()) / 2
                    print( 'correlationTracker %s,%s' % (cx, cy) )
                    print rect
                    win.clear_overlay()
                    win.set_image( myImage )
                    win.add_overlay( rect )
                    sleep( 0.1 )

            #dlib.hit_enter_to_continue()




for f in sys.argv[1:]:
    print("Processing file: {}".format(f))
    img = io.imread(f)
    # The 1 in the second argument indicates that we should upsample the image
    # 1 time.  This will make everything bigger and allow us to detect more
    # faces.
    dets = detector(img, 1)
    print("Number of faces detected: {}".format(len(dets)))
    for i, d in enumerate(dets):
        print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
            i, d.left(), d.top(), d.right(), d.bottom()))

    win.clear_overlay()
    win.set_image(img)
    win.add_overlay(dets)
    dlib.hit_enter_to_continue()


# Finally, if you really want to you can ask the detector to tell you the score
# for each detection.  The score is bigger for more confident detections.
# Also, the idx tells you which of the face sub-detectors matched.  This can be
# used to broadly identify faces in different orientations.
if (len(sys.argv[1:]) > 0):
    img = io.imread(sys.argv[1])
    dets, scores, idx = detector.run(img, 1)
    for i, d in enumerate(dets):
        print("Detection {}, score: {}, face_type:{}".format(
            d, scores[i], idx[i]))

1 个答案:

答案 0 :(得分:2)

您必须时不时地运行面部检测器,以查看面部是否仍然存在。