将OpenCV处理的视频与Tkinter集成

时间:2019-06-29 21:18:00

标签: python python-3.x opencv tkinter

我有一个程序,可以处理视频并识别汽车,并在树莓进入某个地区时向其发送消息。没有GUI,它的工作原理是相当正确的。但是当我使用tkinter GUI时,处理停止工作,GUI上仅显示视频。您能帮我个忙吗?所有代码都已上传

def addpath(dirs):
  import sys,os
  my_path = '/'.join(os.path.abspath(sys.argv[0]).replace('\\','/').split('/')[:-1])+'/'
  if type(dirs) is str:
    sys.path.append(my_path+dirs)
  elif type(dirs) is list:
    for dir in dirs:
      sys.path.append(my_path+dir)

################################################################################

addpath('subdir')  #or addpath(['subdir1',...])

from subdir import b
b.fie()

使用了其他代码

# Name:        Vehicle_detection_V11
# Purpose:      Vehicle detection and tracking for Smart street light project
#                   Daytime
#                       Option 1
#                           Car cascade classifier with kalman filter used for car tracking
#                           Pedistrian cascade classifier for pedistrian
#                       Option 2
#                           Background substraction with kalman filter used for car tracking
#                   Nighttime
#                       Color maniuplation for car detection during night time
# Author:      Freadam and Kaland
# Created:     20/01/2019
# Copyright:   (c) freadam 2019
#-------------------------------------------------------------------------------

#  1  ================== Importing Libraries ============================
import cv2
import numpy as np
import time as Tim
import copy
import socket
from datetime import datetime
from threading import Timer
from PIL import ImageTk
from PIL import Image as _image
from tkinter import *
from tkinter import filedialog

#=============   Using other files  ===================================================
from kalman_filter import KalmanFilter
from tracker import Tracker
# 2 ================= Global Variables ==================================================

#================== Executed inside lamp region ============================
def countdown(dir):
    sdata='00'.encode()
    timedOut = True
    if(dir=='up'):
        sData = '84321'.encode()
    if(dir=='down'):
        sData = '81234'.encode()
    conn.send(sData)
    if(dir=='down1'):
        sData = '81'.encode()
        conn.send(sData)
    if(dir=='down2'):
        sData = '82'.encode()
        conn.send(sData)
    if(dir=='down3'):
        sData = '83'.encode()
        conn.send(sData)
    if(dir=='down4'):
        sData = '84'.encode()
        conn.send(sData)

    print("OFF")
def DayBackgroundSubtraction():
    hasPassed = False
    timedOut = False
    passedVeh = []
    FPS = 30
    #Distance to line in road: ~0.025 miles
    ROAD_DIST_MILES = 0.025
    speedLimit = 65

    # Initial background subtractor and text font
    fgbg = cv2.createBackgroundSubtractorKNN()
    font = cv2.FONT_HERSHEY_PLAIN

    centers = []

    # y-cooridinate for speed detection line
    line2xOne=300
    line2xTwo=640
    line2y=230

    R11x= 310
    R12x=600
    R11y=250
    R12y=470


    blob_min_width_far = 6
    blob_min_height_far = 6

    blob_min_width_near = 18
    blob_min_height_near = 18

    frame_start_time = None

    # Create object tracker
    tracker = Tracker(100, 5, 2, 1)

    # Capture livestream
    cap = vid_capture


    while cap.isOpened():
        frame_start_time = datetime.utcnow()
        centers = []
        (ret, frame) = cap.read()

        if not ret:
            print ('end of the video file')
            break

        frame= cv2.resize(frame,(640,480))


        orig_frame = copy.copy(frame)

        #  Draw line used for speed detection
        cv2.line(frame,(line2xOne,line2y),(line2xTwo,line2y),(0,255,0),1)

        #Draw lines for Region 1
        cv2.line(frame,(R11x,R11y),(R12x,R11y),(0,255,255),1)
        cv2.line(frame,(R11x,R12y),(R12x,R12y),(0,255,255),1)


        # Convert frame to grayscale and perform background subtraction
        gray = cv2.cvtColor (frame, cv2.COLOR_BGR2GRAY)
        fgmask = fgbg.apply (gray)
        #cv2.imshow('',fgmask)

        # Perform some Morphological operations to remove noise
        kernel = np.ones((5,5),np.uint8)
        kernel_dilate = np.ones((5,5),np.uint8)
        opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
        dilation = cv2.morphologyEx(opening, cv2.MORPH_OPEN, kernel_dilate)
        ret,thresh1 = cv2.threshold(dilation,5,255,cv2.THRESH_BINARY)
        thresh2 = cv2.erode(thresh1, None, iterations=2)
        thresh2 = cv2.dilate(thresh2, None, iterations=2)



        _, contours, hierarchy = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        # Find centers of all detected objects
        for cnt in contours:
            x, y, w, h = cv2.boundingRect(cnt)
            if x > line2xOne and x < line2xTwo:
                if y > line2y:
                    if w >= blob_min_width_near and h >= blob_min_height_near:
                        #if w>10 and h>10:
                        center = np.array ([[x+w/2], [y+h/2]])
                        centers.append(np.round(center))

                        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)
                else:
                    if w >= blob_min_width_far and h >= blob_min_height_far:
                        #if w>10 and h>10:
                        center = np.array ([[x+w/2], [y+h/2]])
                        centers.append(np.round(center))

                        cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

        if centers:
            tracker.update(centers)

            for vehicle in tracker.tracks:
                if len(vehicle.trace) > 1:
                    for j in range(len(vehicle.trace)-1):
                        # Draw trace line
                        x1 = vehicle.trace[j][0][0]
                        y1 = vehicle.trace[j][1][0]
                        x2 = vehicle.trace[j+1][0][0]
                        y2 = vehicle.trace[j+1][1][0]

                        #cv2.line(frame, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 255), 2)

                    try:

                            #TODO: account for load lag


                        trace_i = len(vehicle.trace) - 1

                        trace_x = vehicle.trace[trace_i][0][0]
                        trace_y = vehicle.trace[trace_i][1][0]

                        # Check if tracked object has reached the speed detection line
                        if trace_y <= line2y + 5 and trace_y >= line2y - 5 and not vehicle.passed:
                            #cv2.putText(frame, 'PASSED!', (int(trace_x), int(trace_y)), font, 1, (0, 255, 255), 1, cv2.LINE_AA)
                            vehicle.passed = True

                            load_lag = (datetime.utcnow() - frame_start_time).total_seconds()

                            time_dur = (datetime.utcnow() - vehicle.start_time).total_seconds() - load_lag
                            time_dur /= 60
                            time_dur /= 60


                            vehicle.mph = ROAD_DIST_MILES / time_dur

                            # If calculated speed exceeds speed limit, save an image of speeding car
                            if vehicle.mph > speedLimit:

                                #print ('SPEEDING!')
                                cv2.circle(orig_frame, (int(trace_x), int(trace_y)), 20, (0, 0, 255), 2)
                                cv2.putText(orig_frame, 'MPH: %s' % int(vehicle.mph), (int(trace_x), int(trace_y)), font, 1, (0, 0, 255), 1, cv2.LINE_AA)

                                cv2.imwrite('speeding_%s.png' % vehicle.track_id, orig_frame)
                                #print ('FILE SAVED!')



                        if vehicle.passed:
                            cv2.putText(frame, 'ID: '+ str(vehicle.track_id), (int(trace_x), int(trace_y)), font, 1, (255, 255, 255), 1, cv2.LINE_AA)
                            #Check if in region 1
                            if(R11y <= int(trace_y) and int(trace_y) <= R12y):
                                for car_id in passedVeh:
                                    hasPassed = False
                                    if (vehicle.track_id ==car_id):
                                        hasPassed = True
                                        break
                                if (hasPassed == False):
                                    passedVeh.append(vehicle.track_id)

                                    if(len(passedVeh)==1):
                                        sData = '14321'.encode()
                                        conn.send(sData)
                                        print("Starting Timer")
                                        print('ON')
                                        t1 = Timer(8, countdown,args=['up'])
                                        t1.start()

                                    if (len(passedVeh) >= 2):
                                        if (timedOut == False):
                                            noTimer =0
                                            t1.cancel()
                                            print("Cancelled")
                                            sData = '14321'.encode()
                                            conn.send(sData)
                                            print("Restarting Timer")
                                            print('ON')
                                            t1 = Timer(8, countdown, args=['up'])
                                            t1.start()
                                        elif(timedOut == True):
                                            sData = '14321'.encode()
                                            conn.send(sData)
                                            print("Starting Timer")
                                            print('ON')
                                            t1 = Timer(8, countdown,args=['up'])
                                            t1.start()

                    except:
                        pass




        # Display all images
        overlay = frame.copy()
        output = frame.copy()
        alpha = 0.25
        cv2.rectangle(overlay, (R11x, R11y), (R12x, R12y),(0, 255, 0), -1)
        cv2.addWeighted(overlay, alpha, output, 1 - alpha,0, output)
        #cv2.imshow ('original', frame)
        #cv2.imshow ('thresholded', thresh2)
        #cv2.imshow("Output", output)
        #cv2.imshow ('background subtraction', fgmask)


        # Sleep to keep video speed consistent
        Tim.sleep(1.0 / FPS)
        return output


    # Clean up
    cap.release()
    cv2.destroyAllWindows()

    #================== Main Loop  ============================
#===============clock===============
def time():
    timeStamp1 = ""
    now = datetime.now()
    timeStamp2 = now.strftime("%Y-%m-%d %H:%M:%S")
    if timeStamp1 != timeStamp2:
        timeStamp1 = timeStamp2
        timeStamp.config(text=timeStamp2)
    timeStamp.after(200, time)


#===============Browse File====================
def vid_cap():

    vid_file = filedialog.askopenfilename(initialdir="/", title="Select file",
                                          filetypes=(("mp4 files", "*.mp4"), ("all files", "*.*")))
    return vid_file

def when_pressed():
    vid_file = vid_cap()
    global vid_capture
    vid_capture = cv2.VideoCapture(vid_file)
    # return vid_capture


#===============Open Video====================
def show_frame():
    lmain = Label(bottomFrame)
    lmain.grid(row=0, column=0)
    (ret, frame) = vid_capture.read()
    frame = whichOne()
    if ret == True:
        cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
        img = _image.fromarray(cv2image)
        imgtk = ImageTk.PhotoImage(image=img)
        lmain.imgtk = imgtk
        lmain.configure(image=imgtk)
        lmain.after(10, show_frame)
    else:
        #print('lashhhh')
        lmain.destroy()


def whichOne():
    if (18<int(hour) and int(hour)<24) or (0<int(hour) and int(hour)<7):
        #print('Running Night time BackgroundSubtraction')
        frame = NighttimeBackgroundSubtraction()
    else:
        if(choice.get() == 1):
            #print('Running Cascade Classifier')
            frame = CascadeClassifier()
        if(choice.get() == 2):
            #print('Running BackgroundSubtraction')
            frame = DayBackgroundSubtraction()
    return frame
#     # light_intensity = getdouble(LightIntensityTextBox.get())
#     # print(light_intensity)
#     # if light_intensity < 50:
#     #     print('Running Night time BackgroundSubstraction')
#     #     NighttimeBackgroundSubstraction()
#     # else:
#     #     print('Running Day time BackgroundSubstraction')
#     #     DayBackgroundSubstraction()






if __name__ == '__main__':
    # #this IP of laptop
    # TCP_IP = '192.168.43.109'
    # TCP_PORT = 5005
    # #Normally 1024
    # BUFFER_SIZE = 1024
    # s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # s.bind((TCP_IP, TCP_PORT))
    # s.listen(1)
    # print("Server start")
    # print ('Waiting for Connection')
    # conn, addr = s.accept()
    # print ('Connection established! ')
    # print ('With :', addr)
    Time = datetime.now()
    hour = '{:02d}'.format(Time.hour)
    minute = '{:02d}'.format(Time.minute)
    print('Current Time is ',hour,':',minute)

    # ===============GUI====================
    root = Tk()
    root.wm_title('Smart Streetlight System')
    root.iconbitmap("icon3.ico")
    root.geometry('700x700')
    choice = IntVar()

    # Frames
    topFrame = Frame(root)
    topFrame.pack(anchor=NW)
    bottomFrame = Frame(root)
    bottomFrame.pack(anchor=S)

    # Buttons
    playButton = Button(topFrame, text="Play", command=show_frame)
    browseFileButton = Button(topFrame, text="Browse", command=when_pressed)

    # Labels
    # lmain = Label(bottomFrame)
    timeStampLabel = Label(topFrame, text="Time")
    timeStamp = Label(topFrame)
    LightIntensity = Label(topFrame, text="Light Intensity")

    # Text boxes
    LightIntensityTextBox = Entry(topFrame)

    # Radio Buttons
    ccRadioButton = Radiobutton(topFrame, text="Cascade Classifier", variable=choice, value=1)
    bsRadioButton = Radiobutton(topFrame, text="Background Subtraction", variable=choice, value=2)

    # Top Frame Alignment
    timeStampLabel.grid(row=0, column=0)
    timeStamp.grid(row=0, column=1)
    browseFileButton.grid(row=0, column=4)
    LightIntensity.grid(row=3, column=3)
    LightIntensityTextBox.grid(row=3, column=4)
    ccRadioButton.grid(row=4, column=3)
    bsRadioButton.grid(row=4, column=4)
    playButton.grid(row=5, column=4)

    # Bottom Frame Alignment
    # lmain.grid(row=0, column=0)

    if (18<int(hour) and int(hour)<24) or (0<int(hour) and int(hour)<7):
        ccRadioButton.configure(state=DISABLED)
        bsRadioButton.configure(state=DISABLED)

    time()
    mainloop()

另一个文件

import numpy as np


class KalmanFilter(object):
    """Kalman Filter class keeps track of the estimated state of
    the system and the variance or uncertainty of the estimate.
    Predict and Correct methods implement the functionality
    Reference: https://en.wikipedia.org/wiki/Kalman_filter
    Attributes: None
    """

    def __init__(self):
        """Initialize variable used by Kalman Filter class
        Args:
            None
        Return:
            None
        """
        self.dt = 0.005  # delta time

        self.A = np.array([[1, 0], [0, 1]])  # matrix in observation equations
        self.u = np.zeros((2, 1))  # previous state vector

        # (x,y) tracking object center
        self.b = np.array([[0], [255]])  # vector of observations

        self.P = np.diag((3.0, 3.0))  # covariance matrix
        self.F = np.array([[1.0, self.dt], [0.0, 1.0]])  # state transition mat

        self.Q = np.eye(self.u.shape[0])  # process noise matrix
        self.R = np.eye(self.b.shape[0])  # observation noise matrix
        self.lastResult = np.array([[0], [255]])

    def predict(self):
        """Predict state vector u and variance of uncertainty P (covariance).
            where,
            u: previous state vector
            P: previous covariance matrix
            F: state transition matrix
            Q: process noise matrix
        Equations:
            u'_{k|k-1} = Fu'_{k-1|k-1}
            P_{k|k-1} = FP_{k-1|k-1} F.T + Q
            where,
                F.T is F transpose
        Args:
            None
        Return:
            vector of predicted state estimate
        """
        # Predicted state estimate
        self.u = np.round(np.dot(self.F, self.u))
        # Predicted estimate covariance
        self.P = np.dot(self.F, np.dot(self.P, self.F.T)) + self.Q
        self.lastResult = self.u  # same last predicted result
        return self.u

    def correct(self, b, flag):
        """Correct or update state vector u and variance of uncertainty P (covariance).
        where,
        u: predicted state vector u
        A: matrix in observation equations
        b: vector of observations
        P: predicted covariance matrix
        Q: process noise matrix
        R: observation noise matrix
        Equations:
            C = AP_{k|k-1} A.T + R
            K_{k} = P_{k|k-1} A.T(C.Inv)
            u'_{k|k} = u'_{k|k-1} + K_{k}(b_{k} - Au'_{k|k-1})
            P_{k|k} = P_{k|k-1} - K_{k}(CK.T)
            where,
                A.T is A transpose
                C.Inv is C inverse
        Args:
            b: vector of observations
            flag: if "true" prediction result will be updated else detection
        Return:
            predicted state vector u
        """

        if not flag:  # update using prediction
            self.b = self.lastResult
        else:  # update using detection
            self.b = b
        C = np.dot(self.A, np.dot(self.P, self.A.T)) + self.R
        K = np.dot(self.P, np.dot(self.A.T, np.linalg.inv(C)))

        self.u = np.round(self.u + np.dot(K, (self.b - np.dot(self.A,
                                                              self.u))))
        self.P = self.P - np.dot(K, np.dot(C, K.T))
        self.lastResult = self.u
        return self.u

0 个答案:

没有答案