此代码是闭眼程度的度量。 我想在使用视频流时在tkinter标签上打印一个opencv图像(网络摄像头)。 运行时,tkinter和帧分别出现,并且流停止。 并且在tkinter中,不会生成用于眼图测量的条件表达式。 该代码是在搜索大量信息之后创建的。 如何测量眼水平?
import cv2
import sys
import tkinter as tk
import numpy
import playsound
import argparse
import imutils
import time
import dlib
import PIL
from PIL import Image, ImageTk
from scipy.spatial import distance as dist
from imutils.video import VideoStream
from imutils import face_utils
from threading import Thread
root = tk.Tk()
root.title("OpenCV-tkinter")
cvFrame = tk.Frame(root)
cvFrame.grid(row = 0, column = 0, padx = 10, pady = 10)
lbl1 = tk.Label(cvFrame)
lbl1.grid(row = 0, column = 0)
def ExitButton():
sys.exit()
btn = tk.Button(cvFrame, text = "Exit", font = ('Arial', '30', 'bold'), foreground = "Red", command = ExitButton) # (.., height = 2, width = 60, ..)
btn.grid(row = 1, column = 0, columnspan = 2, sticky = tk.N + tk.S + tk.W + tk.E)
def sound_alarm(path):
playsound.playsound(path)
def eye_aspect_ratio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
ap.add_argument("-a", "--alarm", type=str, default="",
help="path alarm .WAV file")
ap.add_argument("-w", "--webcam", type=int, default=0,
help="index of webcam on system")
args = vars(ap.parse_args())
EYE_AR_THRESH = 0.3
EYE_AR_CONSEC_FRAMES = 40
COUNTER = 0
ALARM_ON = False
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
print("[INFO] starting video stream thread...")
vs = VideoStream(src=args["webcam"]).start()
fileStream = False
time.sleep(1.0)
while True:
frame = vs.read()
frame = imutils.resize(frame, width=900)
frame = cv2.flip(frame,1)
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(image)
imgtk = ImageTk.PhotoImage(image=img)
lbl1.imgtk=imgtk
lbl1.configure(image = imgtk)
rects = detector(image, 0)
for rect in rects:
shape = predictor(image, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)
if ear < EYE_AR_THRESH:
COUNTER += 1
if COUNTER >= EYE_AR_CONSEC_FRAMES:
if not ALARM_ON:
ALARM_ON = True
if args["alarm"] != "":
t = Thread(target=sound_alarm,
args=(args["alarm"],))
t.deamon = True
t.start()
cv2.putText(frame, "DROWSINESS ALERT!", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
else:
COUNTER = 0
ALARM_ON = False
cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
root.after(10)
root.mainloop()
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()