所以基本上我有一个具有启动画面Tkinter框架的应用程序。
我需要在openCV检测到脸部时关闭此帧,然后openCV会打开一个窗口,显示相机源并开始跟踪微笑。在下面的代码中初始化的其他类用于更新数据库并在识别出笑容时播放DTMF音,我的主要问题是来自Tkinter帧的主循环将不允许任何其他进程继续。
我还需要在检测到脸部时关闭Tkinter帧。现在我尝试使用专门用于关闭帧的单独的面部检测循环进行设置(帧类具有 facefound()
功能,只需调用 self.destroy()
)。
我需要对帧的主循环进行多处理,以便面部检测器可以实际检查是否在视图中。
当前代码:
import numpy as np
import sys
from imutils.video import VideoStream
import datetime
import argparse
import imutils
import time
import cv2
from chocolate_returner import Choco_returner
from given_resetter import Given_resetter
from tone_player import Tone_player
from play_flagger import Play_flagger
from Welcome_frame import Intro
import time
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
faceCascade = cv2.CascadeClassifier('faces.xml')
smileCascade = cv2.CascadeClassifier('smiles.xml')
cap = VideoStream(usePiCamera=args["picamera"] > 0).start()
time.sleep(2.0)
while True:
framex = cap.read() # Capture frame-by-frame
frame = imutils.resize(framex, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=0
)
# ---- Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
print "found ", len(faces), "faces"
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if (len(faces) > 0):
check = True
it.facefound()
break
while True:
framex = cap.read() # Capture frame-by-frame
frame = imutils.resize(framex, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor= 1.1,
minNeighbors=5,
minSize=(30, 30),
flags=0
)
# ---- Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
print "found ", len(faces), "faces"
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
smile = smileCascade.detectMultiScale(
roi_gray,
scaleFactor= 1.7,
minNeighbors=5,
minSize=(5, 5),
flags=0
)
time.sleep(2.0)
# Set region of interest for smiles
for (x, y, w, h) in smile:
print "Found", len(smile), "smiles!"
cv2.rectangle(roi_color, (x, y), (x+w, y+h), (255, 0, 0), 1)
img = framex
cv2.imwrite("test.jpg", img)
cr = Choco_returner()
gr = Given_resetter()
tp = Tone_player()
pf = Play_flagger()
if (len(smile) > 0):
return1 = cr.returner()
tp.play(str(return1[1]), str(return1[2]))
pf.flag(return1[0])
#print "!!!!!!!!!!!!!!!!!"
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()