在GUI

时间:2018-05-19 10:52:55

标签: python multithreading user-interface infinite-loop keypress

我正在尝试做GUI,用户可以通过眨眼来控制按钮。基本上,短暂的闪烁应该模拟按键盘键Tab(从一个按钮移动到另一个按钮)和长闪烁应模拟按键空间(进入所选按钮)。

这个想法是两个过程,窗口和眨眼检测系统同时运行。所以在这里我得到了所有问题:因为它们都是while循环我不能同时运行它们。

在我附加的代码中,我通过首先打开主窗口然后单击按钮Start以运行eyeblink系统来简化此操作。使用pyautogui.press()我想模仿主窗口中的键盘按下。但是,当眨眼检测系统工作时,主窗口不再可访问(您无法按任何东西)。

我试图在每一帧而不是无限循环中唤起闪烁功能,但它太慢而且无法正确检测闪烁。我也尝试了多处理和'Python无意中退出',没有显示错误,所以不确定发生了什么(我以前尝试过的代码在最后评论)。我也尝试过线程但是以一种简单的方式没有错误但是没有任何错误出现(再次,我用来尝试这个的代码在最后评论)。 在这里,我附上文件的链接(.mp3,.xml,.py): https://drive.google.com/drive/folders/1U2uwHXzl2MtSTlAKw1L68L3xcRmelP2d?usp=sharing

我刚开始使用Python,所以我的知识不高,我已经没时间了,而且我已经陷入困境......所以任何帮助都会受到欢迎!在此先感谢;)

Mac系统 Python 2.7 OpenCV 3.4 Tkinter(我之所以选择它是因为它很容易处理,但如果有必要我可以改变它)

# Ventana+Blink
from Tkinter import *
import numpy as np
import cv2
# To emulate a keyboard pressing
import pyautogui
import time
# To play the sounds
import subprocess 
# from Blink import funcion_blink
# from multiprocessing import Process
# import threading 


def Onbutton_clicked():
    # while True:
    # Repeating 2 times the sound
    for x in range (0,2):
        subprocess.call(['afplay', 'alarm2.mp3'])

def Onbutton2_clicked():
    # Repeating 1 times the sound
    for x in range (0,1):
        subprocess.call(['afplay', 'sound.mp3'])


def execute_func1():
    print('enter\n')
    pyautogui.press('space')  # press the Space key
    for x in range (1,2):
        subprocess.call(['afplay', 'unconvinced.mp3'])

def execute_func2():
    print('tab\n')
    pyautogui.press('tab')  # press the Tab key
    for x in range (1,2):
        subprocess.call(['afplay', 'case-closed.mp3'])

def execute_func3():
    print('space\n')
    pyautogui.press('space')  # press the Space key
    for x in range (1,2):
        subprocess.call(['afplay', 'cheerful.mp3'])


# ----- Eyeblink detection system -----
def funcion_blink():
#XML classifiers should be in the folder with this file
    face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
    eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')

    video_capture = cv2.VideoCapture(0)
    det = 0
    n = 0

    while True:
    # Capture frame-by-frame
        ret, frame = video_capture.read()
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        faces = face_cascade.detectMultiScale(
            gray,
            scaleFactor=1.1,
            minNeighbors=5,
            minSize=(100, 100),
            flags=cv2.CASCADE_SCALE_IMAGE
        )

            # Draw a rectangle around the faces
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = frame[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(
                roi_gray,
                scaleFactor = 1.1,
                minNeighbors = 5,
                minSize = (30, 30),
                flags = cv2.CASCADE_SCALE_IMAGE
            )
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
            face_img = gray[x:x + w, y:y + h]
            face_res = cv2.resize(face_img, (100, 100), interpolation=cv2.INTER_CUBIC)
            eye_reg = face_res[15:85, 20:50]
            cv2.rectangle(frame, (x+15*w/100, y + 2*h / 10), (x + w*85/100, y + (5 * h / 10)), (0, 0, 255), 2)
            if (det < 10):
                tmpl_eyes = eye_reg
                det = det + 1
                print('template acquired\n')
            elif (det == 10):
                        # template matching
                wt, ht = tmpl_eyes.shape[::-1]
                        #res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCORR_NORMED)
                res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCOEFF_NORMED)
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_templ)
                            # print(max_val, n)
                        #value 0.92 should be adapted to the conditions and camera position
                if (max_val>0.90):
                    n=n+1
                else:
                    if (n>=12):
                        execute_func1()
                                #here should go the code that triggers some action when the person blinks??                 
                    elif (n>=6):
                        execute_func2()              
                    elif (n>=3):
                        execute_func3()

                    n = 0
                print(max_val, n)

        # Display the resulting frame
        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    # When everything is done, release the capture
    cv2.destroyAllWindows()
    video_capture.release()


# ---- Main window ----
def main_window():
    root= Tk()   
    root.geometry('700x700')

    # Create the buttons of the main window
    button=Button(root, text='alarm', command=Onbutton_clicked)
    button.bind('<Return>', Onbutton_clicked)
    button.pack()

    button2=Button(root, text='extra', command=Onbutton2_clicked)
    button2.bind('<Return>', Onbutton2_clicked)
    button2.pack()

    # By pressing this button we start running the eyeblink detection system
    button3=Button(root, text='Start', command=funcion_blink)
    button3.bind('<Button-1>', funcion_blink)
    button3.pack()

    # To maintain the window until you close it
    root.mainloop()

# Execute the main window
main_window()


# ---- Trials ----

# while True:
#     main_window()
#     funcion_blink()  
# It just plays one function and when it finishes it plays the next one

# Multiprocessing
# if __name__ == '__main__':
#     Process(target=main_window).start()
#     Process(target=funcion_blink).start()
# PYTHON QUITS UNEXPECTADLY

# Threading
# p1 = threading.Thread(target=main_window, args=())
# p2 = threading.Thread(target=funcion_blink, args=())
# p1.start()
# p2.start()

0 个答案:

没有答案