因此,我尝试使用gstreamer及其videomixer元素将几个流混合在一起。我已经设置并运行了混合管道,但是当运行实时提要时,我将以两倍的速度运行,然后停下来缓冲一会儿,然后以两倍的速度运行,依此类推。我已经看过uridecodebin的缓冲属性,但是这些还没有帮助。有谁知道解决这个问题的好方法?
到目前为止,这是我的代码,我知道它很丑,但目前我正在快速制作原型
import gi
import sys
import math
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
# initializing data for bus signal watch
class CustomData:
is_live = None
pipeline = None
main_loop = None
class Multiview:
def bg_pad_added(self, element, pad):
string = pad.query_caps(None).to_string()
if string.startswith('video/x-raw'):
pad.link(self.bg_videoscale.get_static_pad('sink'))
def decodebin_pad_added(self, element, pad):
name = pad.get_parent().get_name()
destination = self.pipeline.get_by_name(target_name[name])
if destination.get_static_pad('sink').is_linked():
return
pad.link(destination.get_static_pad('sink'))
def __init__(self, streams, x_res, y_res): # TODO - implement stream inputs
self.is_live = None
self.pipeline = None
self.main_loop = None
# streams currently represented as an integer
self.streams = streams
# initialize gstreamer
Gst.init(None)
# initialize target names for URI linking
# use of global variable is not ideal but circumventing this would require a
# total rewrite of gobjects signal implementation, since I would need to pass in
# a new variable outside the signal itself to the above function
global target_name
target_name = dict()
# initialize debugging options
Gst.debug_set_active(True)
Gst.debug_set_default_threshold(1)
# start pipeline
self.pipeline = Gst.Pipeline()
self.pipeline.set_state(Gst.State.PAUSED)
self.data = CustomData()
# TODO - Break sections up into isolated functions maybe (idk if that's actually better)
# create videomixer filter
# can control xpos, ypos, and zorder
self.videomixer = Gst.ElementFactory.make('videomixer', None)
# create videoconvert filter
self.videoconvert = Gst.ElementFactory.make('videoconvert', None)
# create video sink element
self.autovideosink = Gst.ElementFactory.make('autovideosink', None)
self.autovideosink.set_property("sync", False)
# create output videobox
self.outbox = Gst.ElementFactory.make('videobox', None)
self.outbox.set_property('autocrop', True)
# create output filter
# controls final video size
out_caps = Gst.Caps.from_string(f'video/x-raw, width={x_res},height={y_res}')
self.out_filter = Gst.ElementFactory.make('capsfilter', None)
self.out_filter.set_property('caps', out_caps)
# add output elements to pipelien
self.pipeline.add(self.videomixer, self.videoconvert, self.outbox, self.out_filter, self.autovideosink)
# link output pipeline together
self.videomixer.link(self.videoconvert)
self.videoconvert.link(self.outbox)
self.outbox.link(self.out_filter)
self.out_filter.link(self.autovideosink)
# create background videoscale
self.bg_videoscale = Gst.ElementFactory.make('videoscale', 'bgscale')
self.bg_videoscale.set_property('add-borders', False)
# create background element
self.background = Gst.ElementFactory.make('uridecodebin', None)
self.background.set_property('uri', 'file:///Users/scaglia/PycharmProjects/untitled/background.jpg')
self.background.connect("pad-added", self.bg_pad_added)
# create background videofilter
bgcaps = Gst.Caps.from_string(f'video/x-raw, width=1024,height=600')
self.bg_filter = Gst.ElementFactory.make('capsfilter', None)
self.bg_filter.set_property('caps', bgcaps)
# create background imagefreeze. Only necessary if using still image
self.bg_imagefreeze = Gst.ElementFactory.make('imagefreeze', None)
# add background elements to pipeline
self.pipeline.add(self.background, self.bg_videoscale, self.bg_filter, self.bg_imagefreeze)
# link background pipeline together
self.background.link(self.bg_videoscale)
self.bg_videoscale.link(self.bg_filter)
self.bg_filter.link(self.bg_imagefreeze)
self.bg_imagefreeze.link(self.videomixer)
# generate locations for feeds
self.layout_positions = self.init_locations(x_res, y_res)
# would probably take in an array of input streams, all handled here
# TODO - error handling! Also different input types maybe if I need to
# TODO - Verify integrity of inputs BEFORE initializing
# TODO - take in an array of inputs rather than just an int, use for each loop
for n, input in enumerate(streams):
# create input videoscale
videoscale = Gst.ElementFactory.make('videoscale', 'scale_{}'.format(n))
# create input queue element
videoqueue = Gst.ElementFactory.make('queue', 'queue_{}'.format(n))
# get individual in feeds
# this will eventually be unnecessary
file_in = Gst.ElementFactory.make('uridecodebin', 'file_{}'.format(n))
file_in.set_property('uri', input)
file_in.set_property('buffer-size', 1024000)
targetkey = file_in.get_name()
targetdef = videoqueue.get_name()
target_name[targetkey] = targetdef
file_in.connect("pad-added", self.decodebin_pad_added)
# create input aspect ratio crop
# aspectratiocrop = Gst.ElementFactory.make('aspectratiocrop', 'ratiocrop_{}'.format(n))
# aspectratiocrop.set_property('aspect-ratio', Gst.Fraction(1024, 600))
# create input videofilter
# controls individual feed sizes
feed_x_res = self.layout_positions[len(self.streams)-1][0][0]
feed_y_res = self.layout_positions[len(self.streams)-1][0][1]
video_caps = Gst.Caps.from_string(f'video/x-raw, width={feed_x_res},height={feed_y_res}')
videofilter = Gst.ElementFactory.make('capsfilter', 'filter_{}'.format(n))
videofilter.set_property("caps", video_caps)
# set up videomixer pads, sets positions of input streams
mixer_pad = self.videomixer.get_request_pad('sink_{}'.format(n+1))
mixer_pad.set_property("xpos", self.layout_positions[len(self.streams)-1][n+1][0])
mixer_pad.set_property("ypos", self.layout_positions[len(self.streams)-1][n+1][1])
# add input elements to pipeline
self.pipeline.add(file_in, videoqueue, videoscale, aspectratiocrop, videobox, videofilter)
# # link input pipeline together and then to videomixer
# file_in.link(videoscale)
videoqueue.link(videoscale)
videoscale.link(videofilter)
videofilter.link(self.videomixer)
# the problem pipeline
# only fails when trying to mix for some reason
# TODO - fix this. Low priority
# file_in.link(aspectratiocrop)
# aspectratiocrop.link(videoscale)
# videoscale.link(videofilter)
# videofilter.link(self.videomixer)
print('------STARTING STREAM------')
bus = self.pipeline.get_bus()
ret = self.pipeline.set_state(Gst.State.PLAYING)
self.is_live = False
# Generates graph of pipeline
Gst.debug_bin_to_dot_file(self.pipeline, Gst.DebugGraphDetails.ALL, 'supersimple-debug-graph')
if ret == Gst.StateChangeReturn.FAILURE:
print('ERROR: Unable to set the pipeline to the playing state.')
sys.exit(-1)
elif ret == Gst.StateChangeReturn.NO_PREROLL:
self.is_live = True
self.main_loop = GLib.MainLoop.new(None, False)
bus.add_signal_watch()
bus.connect('message', self.cb_message, self.data)
self.main_loop.run()
# initializing the locations of each input stream, based on number of incoming streams
# array structured as layout_positions[number of streams][individual stream number][position]
# first index in the [position] slot for each stream contains the resolution of streams in that layout
# the rest are formatted as [xpos, ypos]
# also this looks like garbage but hey, what can I do. Sorry everyone :D
@staticmethod
def init_locations(x_res, y_res):
def hlf(x):
return math.ceil(x/2)
def thrd(x):
return math.ceil(x/3)
def qrt(x):
return math.ceil(x/4)
def sxth(x):
return math.ceil(x/6)
layout_positions = [
[[x_res, y_res], [0, 0]],
[[hlf(x_res), hlf(y_res)], [0, 0], [hlf(x_res), hlf(y_res)]],
[[hlf(x_res), hlf(y_res)], [0, 0], [hlf(x_res), 0], [qrt(x_res), hlf(y_res)]],
[[hlf(x_res), hlf(y_res)], [0, 0], [hlf(x_res), 0], [0, hlf(y_res)], [hlf(x_res), hlf(y_res)]],
[[hlf(x_res), hlf(y_res)], [0, 0], [hlf(x_res), 0], [0, hlf(y_res)], [hlf(x_res), hlf(y_res)],
[qrt(x_res), qrt(y_res)]],
[[thrd(x_res), thrd(y_res)], [0, sxth(y_res)], [thrd(x_res), sxth(y_res)], [thrd(x_res)*2, sxth(y_res)],
[0, hlf(y_res)], [thrd(x_res), hlf(y_res)], [thrd(x_res)*2, hlf(y_res)]],
[[thrd(x_res), thrd(y_res)], [0, 0], [thrd(x_res), 0], [thrd(x_res)*2, 0], [0, thrd(y_res)],
[thrd(x_res), thrd(y_res)], [thrd(x_res)*2, thrd(y_res)], [thrd(x_res), thrd(y_res)*2]],
[[thrd(x_res), thrd(y_res)], [0, 0], [thrd(x_res), 0], [thrd(x_res)*2, 0], [0, thrd(y_res)],
[thrd(x_res), thrd(y_res)], [thrd(x_res)*2, thrd(y_res)], [sxth(x_res), thrd(y_res)*2],
[hlf(x_res), thrd(y_res)*2]],
[[thrd(x_res), thrd(y_res)], [0, 0], [thrd(x_res), 0], [thrd(x_res)*2, 0], [0, thrd(y_res)],
[thrd(x_res), thrd(y_res)], [thrd(x_res)*2, thrd(y_res)], [0, thrd(y_res)*2], [thrd(x_res), thrd(y_res)*2],
[thrd(x_res)*2, thrd(y_res)*2]]]
return layout_positions
# this handles errors from the pipeline itself
# most of it is native to gstreamer
def cb_message(self, bus, msg, data):
t = msg.type
if t == Gst.MessageType.ERROR:
err, debug = msg.parse_error()
print(err)
self.pipeline.set_state(Gst.State.READY)
self.pipeline.set_state(Gst.State.NULL)
Gst.debug_bin_to_dot_file(
self.pipeline,
Gst.DebugGraphDetails.ALL,
'supersimple-debug-graph')
self.main_loop.quit()
return
if t == Gst.MessageType.EOS:
# end-of-stream
self.pipeline.set_state(Gst.State.READY)
self.main_loop.quit()
return
if t == Gst.MessageType.BUFFERING:
# If the stream is live, we do not care about buffering.
if self.is_live:
return
percent = msg.parse_buffering()
print('Buffering {0}%'.format(percent))
if percent < 100:
self.pipeline.set_state(Gst.State.PAUSED)
else:
self.pipeline.set_state(Gst.State.PLAYING)
return
if t == Gst.MessageType.CLOCK_LOST:
self.pipeline.set_state(Gst.State.PAUSED)
self.pipeline.set_state(Gst.State.PLAYING)
return
streams = ['https://videos3.earthcam.com/fecnetwork/4717.flv/chunklist_w1361202835.m3u8',
'https://videos3.earthcam.com/fecnetwork/13220.flv/chunklist_w2099683048.m3u8',
'https://videos3.earthcam.com/fecnetwork/windjammerHD2.flv/chunklist_w374377947.m3u8']
Multiview(streams, 1024, 600)