Python3处理并以网络摄像头fps显示网络摄像头流

时间:2020-06-25 13:19:19

标签: python python-3.x multithreading opencv tkinter

如何读取相机并以相机的帧频显示图像?

我想连续从网络摄像头读取图像(进行一些快速预处理),然后在窗口中显示图像。这应该以我的网络摄像头提供的帧频(29 fps)运行。 似乎OpenCV GUI和Tkinter GUI太慢,无法以这种帧速率显示图像。这些显然是我实验中的瓶颈。即使没有预处理,图像也无法足够快地显示。我正在使用MacBook Pro 2018。

这是我尝试过的。始终使用OpenCV读取网络摄像头:

  • 一切都在主线程中发生,图像以OpenCV显示:12 fps
  • 阅读相机并在单独的线程中进行预处理,并在主线程中以OpenCV显示图像:20 fps
  • 与上面一样多线程,但不显示图像:29 fps
  • 像上面一样是多线程的,但是使用Tkinter显示图像:不知道确切的fps,但是感觉像是<10 fps。

代码如下:

单循环,OpenCV GUI:

import cv2
import time


def main():
    cap = cv2.VideoCapture(0)
    window_name = "FPS Single Loop"
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)

    start_time = time.time()
    frames = 0

    seconds_to_measure = 10
    while start_time + seconds_to_measure > time.time():
        success, img = cap.read()
        img = img[:, ::-1]  # mirror
        time.sleep(0.01)  # simulate some processing time
        cv2.imshow(window_name, img)
        cv2.waitKey(1)
        frames = frames + 1

    cv2.destroyAllWindows()

    print(
        f"Captured {frames} in {seconds_to_measure} seconds. FPS: {frames/seconds_to_measure}"
    )


if __name__ == "__main__":
    main()

Captured 121 in 10 seconds. FPS: 12.1

多线程的opencv gui:

import logging
import time
from queue import Full, Queue
from threading import Thread, Event

import cv2

logger = logging.getLogger("VideoStream")


def setup_webcam_stream(src=0):
    cap = cv2.VideoCapture(src)
    width, height = (
        cap.get(cv2.CAP_PROP_FRAME_WIDTH),
        cap.get(cv2.CAP_PROP_FRAME_HEIGHT),
    )
    logger.info(f"Camera dimensions: {width, height}")
    logger.info(f"Camera FPS: {cap.get(cv2.CAP_PROP_FPS)}")
    grabbed, frame = cap.read()  # Read once to init
    if not grabbed:
        raise IOError("Cannot read video stream.")
    return cap


def video_stream_loop(video_stream: cv2.VideoCapture, queue: Queue, stop_event: Event):
    while not stop_event.is_set():
        try:
            success, img = video_stream.read()
            # We need a timeout here to not get stuck when no images are retrieved from the queue
            queue.put(img, timeout=1)
        except Full:
            pass  # try again with a newer frame


def processing_loop(input_queue: Queue, output_queue: Queue, stop_event: Event):
    while not stop_event.is_set():
        try:
            img = input_queue.get()
            img = img[:, ::-1]  # mirror
            time.sleep(0.01)  # simulate some processing time
            # We need a timeout here to not get stuck when no images are retrieved from the queue
            output_queue.put(img, timeout=1)
        except Full:
            pass  # try again with a newer frame


def main():
    stream = setup_webcam_stream(0)
    webcam_queue = Queue()
    processed_queue = Queue()
    stop_event = Event()
    window_name = "FPS Multi Threading"
    cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)

    start_time = time.time()
    frames = 0

    seconds_to_measure = 10
    try:
        Thread(
            target=video_stream_loop, args=[stream, webcam_queue, stop_event]
        ).start()
        Thread(
            target=processing_loop, args=[webcam_queue, processed_queue, stop_event]
        ).start()
        while start_time + seconds_to_measure > time.time():
            img = processed_queue.get()
            cv2.imshow(window_name, img)
            cv2.waitKey(1)
            frames = frames + 1
    finally:
        stop_event.set()

    cv2.destroyAllWindows()

    print(
        f"Captured {frames} frames in {seconds_to_measure} seconds. FPS: {frames/seconds_to_measure}"
    )
    print(f"Webcam queue: {webcam_queue.qsize()}")
    print(f"Processed queue: {processed_queue.qsize()}")


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)
    main()
INFO:VideoStream:Camera dimensions: (1280.0, 720.0)
INFO:VideoStream:Camera FPS: 29.000049
Captured 209 frames in 10 seconds. FPS: 20.9
Webcam queue: 0
Processed queue: 82

在这里您可以看到第二个队列中还有剩余图像,这些图像将被提取来显示它们。

当我取消注释这两行时:

cv2.imshow(window_name, img)
cv2.waitKey(1)

然后输出为:

INFO:VideoStream:Camera dimensions: (1280.0, 720.0)
INFO:VideoStream:Camera FPS: 29.000049
Captured 291 frames in 10 seconds. FPS: 29.1
Webcam queue: 0
Processed queue: 0

因此,它能够以网络摄像头的速度处理所有帧,而无需GUI显示它们。

多线程Tkinter gui:

import logging
import time
import tkinter
from queue import Full, Queue, Empty
from threading import Thread, Event

import PIL
from PIL import ImageTk
import cv2

logger = logging.getLogger("VideoStream")


def setup_webcam_stream(src=0):
    cap = cv2.VideoCapture(src)
    width, height = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    logger.info(f"Camera dimensions: {width, height}")
    logger.info(f"Camera FPS: {cap.get(cv2.CAP_PROP_FPS)}")
    grabbed, frame = cap.read()  # Read once to init
    if not grabbed:
        raise IOError("Cannot read video stream.")
    return cap, width, height


def video_stream_loop(video_stream: cv2.VideoCapture, queue: Queue, stop_event: Event):
    while not stop_event.is_set():
        try:
            success, img = video_stream.read()
            # We need a timeout here to not get stuck when no images are retrieved from the queue
            queue.put(img, timeout=1)
        except Full:
            pass  # try again with a newer frame


def processing_loop(input_queue: Queue, output_queue: Queue, stop_event: Event):
    while not stop_event.is_set():
        try:
            img = input_queue.get()
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = img[:, ::-1]  # mirror
            time.sleep(0.01)  # simulate some processing time
            # We need a timeout here to not get stuck when no images are retrieved from the queue
            output_queue.put(img, timeout=1)
        except Full:
            pass  # try again with a newer frame


class App:
    def __init__(self, window, window_title, image_queue: Queue, image_dimensions: tuple):
        self.window = window
        self.window.title(window_title)

        self.image_queue = image_queue

        # Create a canvas that can fit the above video source size
        self.canvas = tkinter.Canvas(window, width=image_dimensions[0], height=image_dimensions[1])
        self.canvas.pack()

        # After it is called once, the update method will be automatically called every delay milliseconds
        self.delay = 1
        self.update()

        self.window.mainloop()

    def update(self):
        try:
            frame = self.image_queue.get(timeout=0.1)  # Timeout to not block this method forever
            self.photo = ImageTk.PhotoImage(image=PIL.Image.fromarray(frame))
            self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)
            self.window.after(self.delay, self.update)
        except Empty:
            pass  # try again next time


def main():
    stream, width, height = setup_webcam_stream(0)
    webcam_queue = Queue()
    processed_queue = Queue()
    stop_event = Event()
    window_name = "FPS Multi Threading"

    try:
        Thread(target=video_stream_loop, args=[stream, webcam_queue, stop_event]).start()
        Thread(target=processing_loop, args=[webcam_queue, processed_queue, stop_event]).start()
        App(tkinter.Tk(), window_name, processed_queue, (width, height))
    finally:
        stop_event.set()

    print(f"Webcam queue: {webcam_queue.qsize()}")
    print(f"Processed queue: {processed_queue.qsize()}")


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG)
    main()
INFO:VideoStream:Camera dimensions: (1280.0, 720.0)
INFO:VideoStream:Camera FPS: 29.000049
Webcam queue: 0
Processed queue: 968

1 个答案:

答案 0 :(得分:6)

在这个答案上,我分享了有关相机FPS VS 显示FPS 的一些注意事项,以及一些演示以下代码的示例:

  • FPS计算基础;
  • 如何将显示FPS从 29 fps 提高到 300+ fps
  • 如何有效使用threadingqueue来捕获相机支持的最接近最大fps;

对于遇到您问题的任何人,这里有两个重要的问题需要首先回答:

  • 所捕获图像的大小是多少?
  • 您的摄像头支持多少FPS? (相机FPS
  • 您能以多快的速度从摄像头中抓取一帧并将其显示在窗口中? (显示FPS

相机FPS与显示FPS

相机fps 是指相机的硬件功能。例如, ffmpeg 告诉我,在640x480的分辨率下,我的相机可以返回最低15 fps和最高30 fps,

ffmpeg -list_devices true -f dshow -i dummy
ffmpeg -f dshow -list_options true -i video="HP HD Camera"

[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=640x480 fps=15 max s=640x480 fps=30
[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=320x180 fps=15 max s=320x180 fps=30
[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=320x240 fps=15 max s=320x240 fps=30
[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=424x240 fps=15 max s=424x240 fps=30
[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=640x360 fps=15 max s=640x360 fps=30
[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=848x480 fps=15 max s=848x480 fps=30
[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=960x540 fps=15 max s=960x540 fps=30
[dshow @ 00000220181cc600]   vcodec=mjpeg  min s=1280x720 fps=15 max s=1280x720 fps=30

这里重要的认识是,尽管能够在内部捕获30 fps,但不能保证应用程序能够在一秒钟内从相机提取这30帧。以下各节将阐明其背后的原因。

显示fps 是指每秒可在一个窗口中绘制多少图像。此数字完全不受相机限制,通常比相机fps高很多。稍后您将看到,它可以创建和应用,每秒从相机中提取29张图像,并每秒绘制300次以上。这意味着在从相机拉下一帧之前,在一个窗口中多次绘制来自相机的同一图像。

我的网络摄像头可以捕获多少FPS?

以下应用程序仅演示如何打印相机使用的默认设置(大小,fps)以及如何从中检索帧,将其显示在窗口中以及计算要渲染的FPS数量:

import numpy as np
import cv2
import datetime
    
def main():
    # create display window
    cv2.namedWindow("webcam", cv2.WINDOW_NORMAL)

    # initialize webcam capture object
    cap = cv2.VideoCapture(0)

    # retrieve properties of the capture object
    cap_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    cap_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    cap_fps = cap.get(cv2.CAP_PROP_FPS)
    fps_sleep = int(1000 / cap_fps)
    print('* Capture width:', cap_width)
    print('* Capture height:', cap_height)
    print('* Capture FPS:', cap_fps, 'ideal wait time between frames:', fps_sleep, 'ms')

    # initialize time and frame count variables
    last_time = datetime.datetime.now()
    frames = 0

    # main loop: retrieves and displays a frame from the camera
    while (True):
        # blocks until the entire frame is read
        success, img = cap.read()
        frames += 1

        # compute fps: current_time - last_time
        delta_time = datetime.datetime.now() - last_time
        elapsed_time = delta_time.total_seconds()
        cur_fps = np.around(frames / elapsed_time, 1)

        # draw FPS text and display image
        cv2.putText(img, 'FPS: ' + str(cur_fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
        cv2.imshow("webcam", img)

        # wait 1ms for ESC to be pressed
        key = cv2.waitKey(1)
        if (key == 27):
            break

    # release resources
    cv2.destroyAllWindows()
    cap.release()


if __name__ == "__main__":
    main()

输出:

* Capture width: 640.0
* Capture height: 480.0
* Capture FPS: 30.0 wait time between frames: 33 ms

如前所述,默认情况下,我的相机能够以30 fps的速度捕获640x480图像,即使上面的循环非常简单,但我的显示FPS 还是比较低:我只能检索帧并以28或29 fps的速度显示它们,而这之间无需执行任何自定义图像处理。发生了什么事?

现实是,即使循环看起来很简单,但在幕后发生的事情却花费了足够的处理时间,使得循环的一次迭代难以在不到33ms的时间内完成:

  • cap.read()对摄像机驱动程序执行I / O调用,以提取新数据。此功能会阻止您的应用程序执行,直到数据完全传输完毕为止。
  • 需要使用新像素设置一个numpy数组;
  • 需要其他调用才能显示窗口并在其中绘制像素,即cv2.imshow(),这通常是较慢的操作;
  • 由于cv2.waitKey(1)的存在,延迟还有1毫秒,这需要保持窗口打开;

所有这些操作(尽管很小)使应用程序难以调用cap.read(),获取新帧并以30 fps的速度精确显示。

您可以尝试多种方法来加快应用程序的显示速度,使其能够显示超出相机驱动程序允许的帧数,this post可以很好地覆盖它们。请记住这一点:您将无法从相机中捕获更多的帧,而不是驱动程序所支持的。但是,您将能够显示更多帧

如何将显示FPS 提高到 300 + threading示例。

用于增加每秒显示的图像量的一种方法依赖于threading包来创建单独的线程以连续地从相机拉帧。发生这种情况是因为应用程序的主循环不再在cap.read()上等待它返回新帧而被阻塞,从而增加了每秒可以显示(或绘制)的帧数。

注意:此方法会在一个窗口上多次渲染同一张图像,直到检索到相机的下一张图像为止。请记住,当它的内容仍在使用来自相机的新数据更新时,它甚至可能会绘制图像。

以下应用程序只是一个学术示例,我不建议将其作为生产代码,以增加每秒在窗口中显示的帧数:

import numpy as np
import cv2
import datetime
from threading import Thread

# global variables
stop_thread = False             # controls thread execution
img = None                      # stores the image retrieved by the camera


def start_capture_thread(cap):
    global img, stop_thread

    # continuously read fames from the camera
    while True:
        _, img = cap.read()

        if (stop_thread):
            break


def main():
    global img, stop_thread

    # create display window
    cv2.namedWindow("webcam", cv2.WINDOW_NORMAL)

    # initialize webcam capture object
    cap = cv2.VideoCapture(0)

    # retrieve properties of the capture object
    cap_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    cap_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    cap_fps = cap.get(cv2.CAP_PROP_FPS)
    fps_sleep = int(1000 / cap_fps)
    print('* Capture width:', cap_width)
    print('* Capture height:', cap_height)
    print('* Capture FPS:', cap_fps, 'wait time between frames:', fps_sleep)

    # start the capture thread: reads frames from the camera (non-stop) and stores the result in img
    t = Thread(target=start_capture_thread, args=(cap,), daemon=True) # a deamon thread is killed when the application exits
    t.start()

    # initialize time and frame count variables
    last_time = datetime.datetime.now()
    frames = 0
    cur_fps = 0

    while (True):
        # blocks until the entire frame is read
        frames += 1

        # measure runtime: current_time - last_time
        delta_time = datetime.datetime.now() - last_time
        elapsed_time = delta_time.total_seconds()

        # compute fps but avoid division by zero
        if (elapsed_time != 0):
            cur_fps = np.around(frames / elapsed_time, 1)

        # TODO: make a copy of the image and process it here if needed

        # draw FPS text and display image
        if (img is not None):
            cv2.putText(img, 'FPS: ' + str(cur_fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.imshow("webcam", img)

        # wait 1ms for ESC to be pressed
        key = cv2.waitKey(1)
        if (key == 27):
            stop_thread = True
            break

    # release resources
    cv2.destroyAllWindows()
    cap.release()


if __name__ == "__main__":
    main()

如何以相机支持的最接近最大fps进行捕捉? threadingqueue的例子。

在性能方面,使用queue的问题是,您所获得的取决于应用程序每秒可以从相机拉出多少帧。如果相机支持30 fps,那么只要完成快速的图像处理操作,这就是应用程序可能所能获得的。否则,显示的帧数(每秒)将减少,并且队列的大小将缓慢增加,直到所有RAM内存用完为止。为避免该问题,请确保为queueSize设置一个数字,以防止队列增长到超出您的OS可以处理的范围。

以下代码是一个幼稚的实现,它创建了一个专用的 thread 来从相机中抓取帧并将其放入 queue 中,随后由主循环使用该应用程序:

import numpy as np
import cv2
import datetime
import queue
from threading import Thread

# global variables
stop_thread = False             # controls thread execution


def start_capture_thread(cap, queue):
    global stop_thread

    # continuously read fames from the camera
    while True:
        _, img = cap.read()
        queue.put(img)

        if (stop_thread):
            break


def main():
    global stop_thread

    # create display window
    cv2.namedWindow("webcam", cv2.WINDOW_NORMAL)

    # initialize webcam capture object
    cap = cv2.VideoCapture(0)
    #cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)

    # retrieve properties of the capture object
    cap_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
    cap_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
    cap_fps = cap.get(cv2.CAP_PROP_FPS)
    print('* Capture width:', cap_width)
    print('* Capture height:', cap_height)
    print('* Capture FPS:', cap_fps)

    # create a queue
    frames_queue = queue.Queue(maxsize=0)

    # start the capture thread: reads frames from the camera (non-stop) and stores the result in img
    t = Thread(target=start_capture_thread, args=(cap, frames_queue,), daemon=True) # a deamon thread is killed when the application exits
    t.start()

    # initialize time and frame count variables
    last_time = datetime.datetime.now()
    frames = 0
    cur_fps = 0

    while (True):
        if (frames_queue.empty()):
            continue

        # blocks until the entire frame is read
        frames += 1

        # measure runtime: current_time - last_time
        delta_time = datetime.datetime.now() - last_time
        elapsed_time = delta_time.total_seconds()

        # compute fps but avoid division by zero
        if (elapsed_time != 0):
            cur_fps = np.around(frames / elapsed_time, 1)

        # retrieve an image from the queue
        img = frames_queue.get()

        # TODO: process the image here if needed

        # draw FPS text and display image
        if (img is not None):
            cv2.putText(img, 'FPS: ' + str(cur_fps), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2, cv2.LINE_AA)
            cv2.imshow("webcam", img)

        # wait 1ms for ESC to be pressed
        key = cv2.waitKey(1)
        if (key == 27):
            stop_thread = True
            break

    # release resources
    cv2.destroyAllWindows()
    cap.release()


if __name__ == "__main__":
    main()

我之前说过 might ,这就是我的意思:即使我使用专用的 thread 从摄像机和 queue 要存储它们,当显示的fps应该为30 fps时,它仍将上限设置为29.3。在这种情况下,我认为问题可能归咎于VideoCapture使用的相机驱动程序或后端实现。在Windows上,默认使用的后端是 MSMF

可以通过在构造函数上传递正确的参数来强制VideoCapture使用不同的后端:

cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)

我对 DShow 的体验很糟糕:相机返回的CAP_PROP_FPS 0 ,显示的FPS停留在 14 。这只是一个示例,说明后端捕获驱动程序如何对相机捕获产生负面影响。

但这是您可以探索的东西。也许在您的操作系统上使用其他后端可以提供更好的结果。这是一个不错的high-level overview of the Video I/O module from OpenCV,其中列出了受支持的后端:

更新

在此答案的评论之一中,OP在Mac OS上将OpenCV 4.1升级到4.3,并观察到FPS渲染方面的显着改进。看来这是与cv2.imshow()相关的性能问题。