在 Python 中流式传输时同步音频和视频

时间:2021-01-31 13:13:30

标签: python opencv streaming pyaudio

我有以下代码:

接收端:

import cv2
import zmq
import socket
import pyaudio
import wave
import numpy as np
import threading


def recv_video_stream():
    context = zmq.Context()
    footage_socket = context.socket(zmq.SUB)
    footage_socket.connect('tcp://127.0.0.1:5554')
    footage_socket.setsockopt_string(zmq.SUBSCRIBE, '')

    while True:
            img = footage_socket.recv_pyobj()
            if type(img) != list:
                npimg = np.frombuffer(img, dtype=np.uint8)
                source = cv2.imdecode(npimg, 1)
                cv2.imshow("image", source)
                cv2.waitKey(30)
            else:
                cv2.destroyAllWindows()
                break


def recv_audio_stream():
    s = socket.socket()
    s.bind(('0.0.0.0', 5003))
    s.listen(10)
    client, addrr = s.accept()
    wf = wave.open('video.wav','rb')
    p = pyaudio.PyAudio()
    print(wf.getframerate())
    CHUNK = 4096
    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    output=True,
                    frames_per_buffer=CHUNK)

    while True:
        data = client.recv(1024*4)
        if data != b'':
            stream.write(data)
        else:
            break


t1 = threading.Thread(target=recv_video_stream, args=())
t2 = threading.Thread(target=recv_audio_stream, args=())
t1.start()
t2.start()
t1.join()
t2.join()

发送方:

import cv2
import zmq
import threading
import pyaudio
import socket
import wave





def video_stream():
    context = zmq.Context()
    footage_socket = context.socket(zmq.PUB)
    footage_socket.bind('tcp://127.0.0.1:5554')

    cap = cv2.VideoCapture('video.mp4')

    while True:
        grabbed, frame = cap.read()# grab the current frame
        if grabbed == True:
            frame = cv2.resize(frame, (500, 300))  # resize the frame
            encoded, buffer = cv2.imencode('.jpg', frame)
            footage_socket.send_pyobj(buffer)
            cv2.waitKey(30)
        else:
            footage_socket.send_pyobj(([1,2,3,4,5,6,7]))
            cap.release()
            cv2.destroyAllWindows()
            break


def audio_stream():
    s = socket.socket()
    s.connect(('127.0.0.1', 5003))
    CHUNK = 1024
    wf = wave.open("video.wav", 'rb')

    p = pyaudio.PyAudio()

    stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
                    channels=wf.getnchannels(),
                    rate=wf.getframerate(),
                    input=True,
                    frames_per_buffer=CHUNK)

    while True:
        data = wf.readframes(CHUNK)
        if data != b'':
            s.send(data)
        else:
            break


t1 = threading.Thread(target=video_stream, args=())
t2 = threading.Thread(target=audio_stream, args=())
t1.start()
t2.start()
t1.join()
t2.join()

如您所见,我正在尝试同步音频和视频(例如,给定音频 .wav 文件和视频 .mp4 文件)。 问题是我在“cv2.waitKey(param)”中尝试了不同的参数,但没有一个能够真正创建同步效果。 有谁知道找到我应该使用什么参数来确保它适用于不同视频和不同 fps 的好方法?

0 个答案:

没有答案