我对堆栈溢出问题提出了相对较新的意见,但我会尽力彻底解释这个问题。
我目前正在使用Axis IP Camera获取CARMA板的实时视频。然后,GStreamer使用RTSP客户端获取这些帧,执行RTP depayload,然后解码从摄像机发送的h.264图像。当我在我的计算机上执行此过程(当前配备i7处理器)时,没有延迟时间,并且流实时输出到屏幕,以30 Hz的速率更新。当我切换到我正在处理的CARMA板时出现问题。 appsink以比实时慢得多的速度接收缓冲区,而不是实时显示。更具体地说,代替以30Hz的速率接收缓冲器,当CARMA板上没有发生其他处理时,它仅以大约10Hz的速率接收缓冲器。还应注意,没有丢弃帧;正在接收缓冲区的appsink正在接收所有缓冲区,但不是实时的。任何关于为什么会发生这种情况的见解都非常感激。我已经检查过以确保时间戳也不是问题(例如,如果我或者没有使用GST时间戳,则appsink接收缓冲区的速率不会改变)。 CARMA板目前正在使用ubuntu 11.04并使用GCC进行编译。以下是一些代码片段及其各自的解释。
一些定义
#define APPSINK_CAPS "video/x-raw-yuv,format=(fourcc)I420"
#define RTSP_URI "rtsp://(ipaddress)/axis-media/media.amp?videocodec=h264"
#define RTSP_LATENCY 0
#define RTSP_BUFFER_MODE 0
#define RTSP_RTP_BLOCKSIZE 65536
GStreamer管道设置代码:
/* Initialize GStreamer */
gst_init (&argc, &argv);
/* Create the elements */
data.rtspsrc = gst_element_factory_make("rtspsrc", NULL);
data.rtph264depay = gst_element_factory_make("rtph264depay", NULL);
data.nv_omx_h264dec = gst_element_factory_make("nv_omx_h264dec", NULL);
data.appsink = gst_element_factory_make("appsink", NULL);
if (!data.rtspsrc || !data.rtph264depay || !data.nv_omx_h264dec || !data.appsink) {
g_printerr ("Not all elements could be created.\n");
return -1;
}
/* Set element properties */
g_object_set( data.rtspsrc, "location", RTSP_URI,
"latency", RTSP_LATENCY,
"buffer-mode", RTSP_BUFFER_MODE,
"rtp-blocksize", RTSP_RTP_BLOCKSIZE,
NULL);
g_object_set( data.rtph264depay, "byte-stream", FALSE, NULL);
g_object_set( data.nv_omx_h264dec, "use-timestamps", TRUE, NULL);
/* Configure appsink. This plugin will allow us to access buffer data */
GstCaps *appsink_caps;
appsink_caps = gst_caps_from_string (APPSINK_CAPS);
g_object_set (data.appsink, "emit-signals", TRUE,
"caps", appsink_caps,
NULL);
g_signal_connect (data.appsink, "new-buffer", G_CALLBACK (appsink_new_buffer), &data);
gst_caps_unref (appsink_caps);
/* Create the empty pipeline */
data.pipeline = gst_pipeline_new ("test-pipeline");
if (!data.pipeline) {
g_printerr ("Pipeline could not be created.");
}
/* Build the pipeline */
/* Note that we are NOT linking the source at this point. We will do it later. */
gst_bin_add_many (GST_BIN(data.pipeline),
data.rtspsrc,
data.rtph264depay,
data.nv_omx_h264dec,
data.appsink,
NULL);
if (gst_element_link (data.rtph264depay, data.nv_omx_h264dec) != TRUE) {
g_printerr ("rtph264depay and nv_omx_h264dec could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
if (gst_element_link (data.nv_omx_h264dec, data.appsink) != TRUE) {
g_printerr ("nv_omx_h264dec and appsink could not be linked.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Connect to the pad-added signal (CALLBACK!) */
g_signal_connect (data.rtspsrc, "pad-added", G_CALLBACK (pad_added_handler), &data);
/* Add a probe to perform hashing on H.264 bytestream */
GstPad *rtph264depay_src_pad = gst_element_get_static_pad (data.rtph264depay, "src");
(gulong) gst_pad_add_buffer_probe (rtph264depay_src_pad, G_CALLBACK (hash_and_report), (gpointer)(&data));
gst_object_unref (rtph264depay_src_pad); //unreference the source pad
/* Start playing */
ret = gst_element_set_state (data.pipeline, GST_STATE_PLAYING);
if (ret == GST_STATE_CHANGE_FAILURE) {
g_printerr ("Unable to set the pipeline to the playing state.\n");
gst_object_unref (data.pipeline);
return -1;
}
/* Wait until error or EOS */
bus = gst_element_get_bus (data.pipeline);
do {
msg = gst_bus_timed_pop_filtered (bus, GST_CLOCK_TIME_NONE, (GstMessageType)(GST_MESSAGE_STATE_CHANGED | GST_MESSAGE_ERROR | GST_MESSAGE_EOS));
/* Parse message */
if (msg != NULL) {
GError *err;
gchar *debug_info;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_ERROR:
gst_message_parse_error (msg, &err, &debug_info);
g_printerr ("Error received from element %s: %s\n", GST_OBJECT_NAME (msg->src), err->message);
g_printerr ("Debugging information: %s\n", debug_info ? debug_info : "none");
g_clear_error (&err);
g_free (debug_info);
terminate = TRUE;
break;
case GST_MESSAGE_EOS:
g_print ("End-Of-stream reached.\n");
break;
case GST_MESSAGE_STATE_CHANGED:
/* We are only interested in state-changed messages from the pipeline */
if (GST_MESSAGE_SRC (msg) == GST_OBJECT (data.pipeline)) {
GstState old_state, new_state, pending_state;
gst_message_parse_state_changed (msg, &old_state, &new_state, &pending_state);
g_print ("Pipeline state changed from %s to %s:\n", gst_element_state_get_name (old_state), gst_element_state_get_name (new_state));
}
break;
default:
//we should not reach here because we only asked for ERRORs and EOS and State Changes
g_printerr ("Unexpected message received.\n");
break;
}
gst_message_unref (msg);
}
} while (!terminate);
现在pad_added_handler:
/* This function will be called by the pad-added signal */
//Thread 1
static void pad_added_handler (GstElement *src, GstPad *new_pad, CustomData *data) {
GstPad *sink_pad = gst_element_get_static_pad (data->rtph264depay, "sink");
GstPadLinkReturn ret;
GstCaps *new_pad_caps = NULL;
GstStructure *new_pad_struct = NULL;
const gchar *new_pad_type = NULL;
g_print ("Received new pad '%s' from '%s':\n", GST_PAD_NAME (new_pad), GST_ELEMENT_NAME (src));
/* Check the new pad's type */
new_pad_caps = gst_pad_get_caps (new_pad);
new_pad_struct = gst_caps_get_structure (new_pad_caps, 0);
new_pad_type = gst_structure_get_name (new_pad_struct);
if (!g_str_has_prefix (new_pad_type, "application/x-rtp")) {
g_print (" It has type '%s' which is not RTP. Ignoring.\n", new_pad_type);
goto exit;
}
/* If our converter is already linked, we have nothing to do here */
if (gst_pad_is_linked (sink_pad)) {
g_print (" We are already linked. Ignoring.\n");
goto exit;
}
/* Attempt the link */
ret = gst_pad_link (new_pad, sink_pad);
if (GST_PAD_LINK_FAILED (ret)) {
g_print (" Type is '%s' but link failed.\n", new_pad_type);
} else {
g_print (" Link succeeded (type '%s').\n", new_pad_type);
}
exit:
/* Unreference the new pad's caps, if we got them */
if (new_pad_caps != NULL)
gst_caps_unref (new_pad_caps);
/* Unreference the sink pad */
gst_object_unref (sink_pad);
}
现在每次appsink收到缓冲区时都会调用appsink。这是我相信(尽管不确定)实时没有接收缓冲区的函数,这让我相信我正在做的某种处理导致在处理另一个缓冲区之前需要花费太多时间:
// Called when appsink receives a buffer: Thread 1
void appsink_new_buffer (GstElement *sink, CustomData *data) {
GstBuffer *buffer;
/* Retrieve the buffer */
g_signal_emit_by_name (sink, "pull-buffer", &buffer);
if (buffer) {
(((CustomData*)data)->appsink_buffer_count)++;
//push buffer onto queue, to be processed in different thread
if (GstBufferQueue->size() > GSTBUFFERQUEUE_SIZE) {
//error message
printf ("GstBufferQueue is full!\n");
//release buffer
gst_buffer_unref (buffer);
} else {
//push onto queue
GstBufferQueue->push(buffer);
//activate thread
connectionDataAvailable_GstBufferQueue.notify_all();
}
}
}
我正在使用的相机的链接:
http://www.axis.com/products/cam_p1357/index.htm
希望这会有所帮助。我将继续自己调查此问题并提供更新。如果您需要任何其他信息,请告诉我,我期待您的回复!
由于
答案 0 :(得分:0)
显然问题不在于程序(即软件设计),而在于CARMA板上的硬件组件无法跟上我正在进行的处理量。换句话说,CARMA上的Tegra 3处理器不足以作为设备。可能的解决方案是减少我在CARMA板上进行的处理或升级到不同的板。我希望这有助于人们理解小型设备上可用的有限处理,同时也要了解处理器(特别是在实现片上系统模型的Tegra 3类别中)可能目前没有计算机跟上需要大量实时计算的项目或系统所需的电力。
简而言之,小心你买的东西!尽力确保您购买的产品适合该项目!话虽这么说,不要害怕尝试新的设备。尽管不能做我想做的事,但我学到的东西比我想象的要多。毕竟,计算机科学只是不断学习:p