Gstreamer将摄像机实时录制到多个文件

时间:2017-10-25 03:08:08

标签: video gstreamer video-capture video-recording

我正在开发一个应用程序,我需要捕获数小时的连续视频,然后将特定时间窗口上传到远程计算机。由于视频数据很大,而且这个系统将收集数月,因此必须对所有内容进行压缩。

我的第一次尝试是使用两个线程来避免丢帧 - 第一个有一个非常简单的管道没有压缩,而第二个执行压缩。这是我想要在第一个上运行的管道:

gst-launch-1.0 v4l2src num-buffers=300 ! video/x-raw,framerate=30/1 ! filesink location=myFile

此命令将在循环中运行以捕获一系列文件。不幸的是,虽然这应该捕获300/30 = 10s的视频,但执行需要30秒。这意味着在循环的下一次迭代开始之前,我正在丢失大量的视频。

我查看了splitmuxsink,但是收到一条消息说它不存在,即使我安装了所有插件。

似乎gstreamer必须足够强大才能访问缓冲的原始视频并选择它的块以保存为缓冲区填充,但是没有任何示例可以找到与此相近的任何内容。其他人会如何推荐实现这一目标?

详细说明:
Jetson Tx1
Ubuntu 14.04

更新

有几个人建议使用gstreamer元素splitmuxsink - 我认为这样可以正常工作,但是我的TX1配备了gstreamer 1.2.4,它早于splitmuxsink。我研究了更新gstreamer并没有找到任何有用的东西/它会破坏Nvidia为硬件优化所包含的一堆工具。

我构建了一个python应用程序来尝试动态修改gstreamer管道,将流指向不同的filesink(见下文)。代码运行正常,(通常)创建一系列avi文件。有几个问题:

  1. 即使启动新文件的事件每10秒触发一次,视频本身也只有3秒!
  2. 有时,文件将被保存而没有数据,所有后续文件都将为空
  3. 有没有人对这些问题有任何见解?

    #! /usr/bin/env python
    
    import os
    import sys
    import logging
    
    import gi
    gi.require_version('Gst', "1.0")
    gi.require_version('GstBase', "1.0")
    gi.require_version('Gtk', "3.0")
    #gi.require_version('GdkX11', '3.0')
    gi.require_version('GstVideo', '1.0')
    
    from gi.repository import GObject, Gst, GstBase, Gtk, GstVideo, GdkX11
    
    import numpy as np
    import datetime as dt
    
    log = logging.getLogger(__name__)
    
    
    class Segmenter:
        def __init__(self):
            Gst.init(None)
    
            self.terminate = False
    
            # Create gstreamer pipeline
            cmd = "v4l2src ! tee name=tee ! fakesink"
            self.pipeline = Gst.parse_launch(cmd)
    
            # Store references to gstreamer objects
            self.bus = self.pipeline.get_bus()
            self.recordpipe = None
    
        def run(self):    
            # Initiate main loop
            self.pipeline.set_state(Gst.State.PAUSED)
            self.pipeline.set_state(Gst.State.PLAYING)
    
            try:
                while not self.terminate:
                    print(dt.datetime.now().time())
                    # Listen for event messages on the bus
                    msg = self.bus.timed_pop_filtered(10 * Gst.SECOND, (Gst.MessageType.EOS | Gst.MessageType.ERROR))
    
                    if msg:
                        if msg.type == Gst.MessageType.ERROR:
                            err, dbg = msg.parse_error()
                            print("ERROR:", msg.src.get_name(), ":", err)
                            if dbg:
                                print("Debug info:", dbg)
                            self.terminate = True
    
                        elif msg.type == Gst.MessageType.EOS:
                            print("End-Of-Stream reached")
                            self.terminate = True
    
                    else:
                        # No message - must have reached timeout
                        self.begin_new_file()
    
    
            finally:
                # Free up resources
                self.pipeline.set_state(Gst.State.NULL)
    
        def begin_new_file(self):
    
            # If recording a file currently, terminate it
            if self.recordpipe is not None:
    
                # Block new data
                filequeue = self.recordpipe.get_by_name("filequeue")
                filequeue.get_static_pad("src").add_probe(Gst.PadProbeType.BLOCK_DOWNSTREAM, self.probe_block)
    
                # Disconnect the recording pipe
                self.pipeline.get_by_name("tee").unlink(self.recordpipe)
    
                # Send a termination event to trigger the save
                filequeue.get_static_pad("sink").send_event(Gst.Event.new_eos())
    
                # Clear the reference to the pipe
                self.recordpipe = None
    
            # Create a new file target
            filename = dt.datetime.now().strftime("%Y-%m-%d_%H.%M.%S") + ".avi"
            print("Recording {}...".format(filename))
    
            # Create a new pipeline for the new file
            self.recordpipe = Gst.parse_bin_from_description("queue name=filequeue ! jpegenc ! avimux ! filesink location={} sync=False".format(filename), True)
            self.pipeline.add(self.recordpipe)
    
            # Connect to the main pipe
            self.pipeline.get_by_name("tee").link(self.recordpipe)
    
            # Start passing data
            self.recordpipe.set_state(Gst.State.PLAYING)
    
        def probe_block(self, pad, buf):
            """ Callback for downstream block """
            print('block.')
            return True
    
    
    if __name__ == '__main__':
        seg = Segmenter()
        seg.run()
    

2 个答案:

答案 0 :(得分:0)

答案 1 :(得分:0)

这里是一个示例,该示例将python3中的gstreamer与nvidia TX2以及splitmuxsink一起用于将记录的视频分段。 我也使用了回调函数来用时间戳重命名输出段。

import gi
gi.require_version('Gst', "1.0")
gi.require_version('GstBase', "1.0")
gi.require_version('Gtk', "3.0")
gi.require_version('GstVideo', '1.0')

from gi.repository import GObject, Gst, GstBase, Gtk, GstVideo, GdkX11

GObject.threads_init()
Gst.init(None)

import datetime as dt
import time



class config():
     width = 1280
     height = 720
     fps = 50
     flip = 0
     segments_time = 5000000000 # 5 seconds in nano
     camera_type = "csi"
     usb_cam = "video0"
     iframe_interval = 30 # every 30 frames place a keyframe, this is needed to set frame at 4.75 seconds


config()

global ref_stamp
ref_stamp = 0


# global first segment flag 
global first_segment
first_segment = False


def format_location_callback (splitmux, fragment_id):

    global first_segment

    local_frame_t = round(time.time() * 1000)
    timestamp = str(local_frame_t)
    name = str(timestamp) + ".mp4" 

    # first segment is affected by the time needed to open the camera source so it will have around 6 seconds in lenght but the actual recorded frames inside are for 4,5 seconds
    # in this case first segemnt will be saved as test and only the rest are actual segments with the good timestamp
    if first_segment == False:
        name = 'test' 
    first_segment = True
    return str(name)


src = pipeline.get_by_name ('splitmux')
src.connect("format-location", format_location_callback)


pipeline.set_state(Gst.State.PLAYING)

#playing = round(time.time() * 1000)
#print("PLAYING: ", playing)

bus = pipeline.get_bus()
msg = bus.pop_filtered(Gst.MessageType.EOS | Gst.MessageType.ERROR)

count = 1 

while True:
    count = count + 1
    print (count)
    time.sleep(1)
    # if 15 seconds passed stop the recording;
    if count == 15 :
        break

# send the end of stream event and then wait in the bottom loop for event to be send 
# throughout the pipeline.
pipeline.send_event(Gst.Event.new_eos())

# wait in the loop until end of stream is reached
while True:
    msg = bus.pop_filtered(Gst.MessageType.EOS | Gst.MessageType.ERROR)
    if msg:
      print("ENTER")
      if msg.type == Gst.MessageType.ERROR:
           err, dbg = msg.parse_error()
           print("ERROR:", msg.src.get_name(), ":", err)
           if dbg:
                print("Debug info:", dbg)
                break


      elif msg.type == Gst.MessageType.EOS:
                print("End-Of-Stream reached")
                break
    time.sleep(0.1)

pipeline.set_state(Gst.State.NULL)

ref_stamp = round(time.time() * 1000)
print("EXIT: ", ref_stamp)