我正在尝试使用Twisted web.Resource实现一个MJPEG服务器 通过读取本身的上游gstreamer进程来获取其数据 将MJPEG数据写入TCP端口localhost:9999。我有类似的东西 现在:
from twisted.internet import reactor, protocol, defer
from twisted.web import server, resource
class MJpegResource(resource.Resource):
def __init__(self, queues):
self.queues = queues
@defer.inlineCallbacks
def deferredRenderer(self, request):
q = defer.DeferredQueue()
self.queues.append([q, request])
while True:
yield q.get()
def render_GET(self, request):
request.setHeader("content-type", 'multipart/x-mixed-replace; boundary=--spionisto')
self.deferredRenderer(request)
return server.NOT_DONE_YET
class JpegStreamReader(protocol.Protocol):
def dataReceived(self, data):
for (q, req) in self.factory.queues:
req.write(data)
q.put('')
root = File('web')
root.putChild('stream.mjpeg', MJpegResource(queues))
factory = protocol.Factory()
factory.protocol = JpegStreamReader
factory.queues = queues
reactor.listenTCP(9999, factory)
site = server.Site(root)
reactor.listenTCP(80, site)
# spawn gstreamer process which writes to port 9999.
# The gstream process is launched using:
# gst-launch-1.0 -v \
# v4l2src device=/dev/video0 \
# ! video/x-raw,framerate=15/1, width=640, height=480 \
# ! jpegenc \
# ! multipartmux boundary=spionisto \
# ! tcpclientsink host=127.0.0.1 port=9999 \
reactor.run()
类似于:
gstreamer --> JpegStreamReader --> MJpegResource
这个工作正常,但我偶尔发现了视频 浏览器远远落后于" live" (多达30-40秒 有时)。刷新浏览器后,MJPEG流就会跳回 生活"生活"。所以我怀疑JpegStreamReader是不能的 尽可能快地写入对应于web.http.Request的TCP套接字 gstreamer正在填充TCP套接字9999,事情正在缓冲 在JpegStreamReader的输入队列上。
由于流应该是" live",我可以将帧丢弃 带回视频直播。但是,我不确定如何检测 JpegStreamReader落后等?有关如何的任何建议 使这个管道更像是一个直播流?
如果从根本上建立了另一种架构,建议 我们也非常感激。
答案 0 :(得分:2)
这是实施Jean-Paul Calerone的最终解决方案 建议。请注意,现在我们有一个实现的JpegProducer类 PushProducer接口。当请求暂停时,它会设置一个标志。这个 使TCP流阅读器(JpegStreamReader)不会将帧推入 如果它被堵塞,那个特定的生产者。根据让 - 保罗的建议,我 还必须将多部分MJPEG流分解为块,以便我们 总是在不破坏MJPEG输出格式的情况下丢帧。
from twisted.internet import reactor, protocol, defer, interfaces
from twisted.web import server, resource
from zope.interface import implementer
class MJpegResource(resource.Resource):
def __init__(self, queues):
self.queues = queues
def setupProducer(self, request):
producer = JpegProducer(request)
request.notifyFinish().addErrback(self._responseFailed, producer)
request.registerProducer(producer, True)
self.queues.append(producer)
def _responseFailed(self, err, producer):
producer.stopProducing()
def render_GET(self, request):
request.setHeader("content-type", 'multipart/x-mixed-replace; boundary=--spionisto')
self.setupProducer(request)
return server.NOT_DONE_YET
@implementer(interfaces.IPushProducer)
class JpegProducer(object):
def __init__(self, request):
self.request = request
self.isPaused = False
self.isStopped = False
self.delayedCall = None
def cancelCall(self):
if self.delayedCall:
self.delayedCall.cancel()
self.delayedCall = None
def pauseProducing(self):
self.isPaused = True
self.cancelCall()
def resetPausedFlag(self):
self.isPaused = False
self.delayedCall = None
def resumeProducing(self):
# calling self.cancelCall is defensive. We should not really get
# called with multiple resumeProducing calls without any
# pauseProducing in the middle.
self.cancelCall()
self.delayedCall = reactor.callLater(1, self.resetPausedFlag)
log('producer is requesting to be resumed')
def stopProducing(self):
self.isPaused = True
self.isStopped = True
log('producer is requesting to be stopped')
MJPEG_SEP = '--spionisto\r\n'
class JpegStreamReader(protocol.Protocol):
def __init__(self):
self.tnow = None
def connectionMade(self):
self.data = ''
self.tnow = datetime.now()
def dataReceived(self, data):
self.data += data
chunks = self.data.rsplit(MJPEG_SEP, 1)
dataToSend = ''
if len(chunks) == 2:
dataToSend = chunks[0] + MJPEG_SEP
self.data = chunks[-1]
for producer in self.factory.queues:
if (not producer.isPaused):
producer.request.write(dataToSend)
答案 1 :(得分:1)
您可以在Request
对象上注册生产者。当pauseProducing
的写缓冲区已满时,它将调用Request
方法。当房间可用时,它将进行resumeProducing
方法调用。
您可以使用此信息删除可能无法及时传递的帧。但是,您必须实际识别服务器中的帧(目前您只有一个dataReceived
方法,它将数据作为流传递,而不知道帧的开始或结束位置)。这也存在缓冲区充满度可能是流中延迟的非常滞后指示的问题。如果系统中的瓶颈在于从gstreamer读取数据并将其写入请求之间,那么在程序的这一部分添加背压灵敏度并不会有所帮助。