c ++ - 使用FFmpeg编码和UDP与网络摄像头

时间:2018-02-01 13:34:42

标签: c++ opencv video ffmpeg libavcodec

我正在尝试使用OpenCV从网络摄像头获取帧,使用FFmpeg对其进行编码并使用UDP发送它们。

我在一个类似的项目之前做过,不是用UDP发送数据包,而是将它们保存在视频文件中。

我的代码是。

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}

#include <opencv2/opencv.hpp>

using namespace std;
using namespace cv;

#define WIDTH 640
#define HEIGHT 480
#define CODEC_ID AV_CODEC_ID_H264
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P

static AVFrame *frame, *pFrameBGR;

int main(int argc, char **argv)
{
VideoCapture cap(0);
const char *url = "udp://127.0.0.1:8080";

AVFormatContext *formatContext;
AVStream *stream;
AVCodec *codec;
AVCodecContext *c;
AVDictionary *opts = NULL;

int ret, got_packet;

if (!cap.isOpened())
{
    return -1;
}

av_log_set_level(AV_LOG_TRACE);

av_register_all();
avformat_network_init();

avformat_alloc_output_context2(&formatContext, NULL, "h264", url);
if (!formatContext)
{
    av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
}

codec = avcodec_find_encoder(CODEC_ID);
if (!codec)
{
    av_log(NULL, AV_LOG_ERROR, "Could not find encoder.\n");
}

stream = avformat_new_stream(formatContext, codec);

c = avcodec_alloc_context3(codec);

stream->id = formatContext->nb_streams - 1;
stream->time_base = (AVRational){1, 25};

c->codec_id = CODEC_ID;
c->bit_rate = 400000;
c->width = WIDTH;
c->height = HEIGHT;
c->time_base = stream->time_base;
c->gop_size = 12;
c->pix_fmt = STREAM_PIX_FMT;

if (formatContext->flags & AVFMT_GLOBALHEADER)
    c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

av_dict_set(&opts, "preset", "fast", 0);

av_dict_set(&opts, "tune", "zerolatency", 0);

ret = avcodec_open2(c, codec, NULL);
if (ret < 0)
{
    av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}

pFrameBGR = av_frame_alloc();
if (!pFrameBGR)
{
    av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}

frame = av_frame_alloc();
if (!frame)
{
    av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}

frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;

ret = avcodec_parameters_from_context(stream->codecpar, c);
if (ret < 0)
{
    av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}

av_dump_format(formatContext, 0, url, 1);

ret = avformat_write_header(formatContext, NULL);
if (ret != 0)
{
    av_log(NULL, AV_LOG_ERROR, "Failed to connect to '%s'.\n", url);
}

Mat image(Size(HEIGHT, WIDTH), CV_8UC3);
SwsContext *swsctx = sws_getContext(WIDTH, HEIGHT, AV_PIX_FMT_BGR24, WIDTH, HEIGHT, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
int frame_pts = 0;

while (1)
{
    cap >> image;

    int numBytesYUV = av_image_get_buffer_size(STREAM_PIX_FMT, WIDTH, HEIGHT, 1);
    uint8_t *bufferYUV = (uint8_t *)av_malloc(numBytesYUV * sizeof(uint8_t));

    avpicture_fill((AVPicture *)pFrameBGR, image.data, AV_PIX_FMT_BGR24, WIDTH, HEIGHT);
    avpicture_fill((AVPicture *)frame, bufferYUV, STREAM_PIX_FMT, WIDTH, HEIGHT);

    sws_scale(swsctx, (uint8_t const *const *)pFrameBGR->data, pFrameBGR->linesize, 0, HEIGHT, frame->data, frame->linesize);

    AVPacket pkt = {0};
    av_init_packet(&pkt);

    frame->pts = frame_pts;

    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Error encoding frame\n");
    }

    if (got_packet)
    {
        pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, c->time_base, stream->time_base);
        pkt.stream_index = stream->index;

        return av_interleaved_write_frame(formatContext, &pkt);

        cout << "Seguro que si" << endl;
    }
    frame_pts++;
}

avcodec_free_context(&c);
av_frame_free(&frame);
avformat_free_context(formatContext);

return 0;
}

代码编译但在函数 av_interleaved_write_frame()中返回分段错误。我尝试了几个实现或几个编解码器(在这种情况下我使用libopenh264,但使用mpeg2video返回相同的分段错误)。我也尝试使用 av_write_frame(),但它返回相同的错误。

正如我之前所说,我只想从通过USB连接的网络摄像头中抓取帧,将它们编码为H264并通过UDP将数据包发送到另一台PC。

我运行可执行文件时的控制台日志是。

[100%] Built target display
[OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::SetOption():ENCODER_OPTION_TRACE_CALLBACK callback = 0x7f0c302a87c0.
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::InitEncoder(), openh264 codec version = 5a5c4f1
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:iUsageType = 0,iPicWidth= 640;iPicHeight= 480;iTargetBitrate= 400000;iMaxBitrate= 400000;iRCMode= 0;iPaddingFlag= 0;iTemporalLayerNum= 1;iSpatialLayerNum= 1;fFrameRate= 25.000000f;uiIntraPeriod= 12;eSpsPpsIdStrategy = 0;bPrefixNalAddingCtrl = 0;bSimulcastAVC=0;bEnableDenoise= 0;bEnableBackgroundDetection= 1;bEnableSceneChangeDetect = 1;bEnableAdaptiveQuant= 1;bEnableFrameSkip= 0;bEnableLongTermReference= 0;iLtrMarkPeriod= 30, bIsLosslessLink=0;iComplexityMode = 0;iNumRefFrame = 1;iEntropyCodingModeFlag = 0;uiMaxNalSize = 0;iLTRRefNum = 0;iMultipleThreadIdc = 1;iLoopFilterDisableIdc = 0 (offset(alpha/beta): 0,0;iComplexityMode = 0,iMaxQp = 51;iMinQp = 0)
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:sSpatialLayers[0]: .iVideoWidth= 640; .iVideoHeight= 480; .fFrameRate= 25.000000f; .iSpatialBitrate= 400000; .iMaxSpatialBitrate= 400000; .sSliceArgument.uiSliceMode= 1; .sSliceArgument.iSliceNum= 0; .sSliceArgument.uiSliceSizeConstraint= 1500;uiProfileIdc = 66;uiLevelIdc = 41
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:SliceArgumentValidationFixedSliceMode(), unsupported setting with Resolution and uiSliceNum combination under RC on! So uiSliceNum is changed to 6!
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:Setting MaxSpatialBitrate (400000) the same at SpatialBitrate (400000) will make the    actual bit rate lower than SpatialBitrate
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:bEnableFrameSkip = 0,bitrate can't be controlled for RC_QUALITY_MODE,RC_BITRATE_MODE and RC_TIMESTAMP_MODE without enabling skip frame.
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:Change QP Range from(0,51) to (12,42)
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WELS CPU features/capacities (0x4007fe3f) detected:   HTT:      Y, MMX:      Y, MMXEX:    Y, SSE:      Y, SSE2:     Y, SSE3:     Y, SSSE3:    Y, SSE4.1:   Y, SSE4.2:   Y, AVX:      Y, FMA:      Y, X87-FPU:  Y, 3DNOW:    N, 3DNOWEX:  N, ALTIVEC:  N, CMOV:     Y, MOVBE:    Y, AES:      Y, NUMBER OF LOGIC PROCESSORS ON CHIP: 8, CPU CACHE LINE SIZE (BYTES):        64
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt() exit, overall memory usage: 4542878 bytes
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt(), pCtx= 0x0x245a400.
Output #0, h264, to 'udp://192.168.100.39:8080':
Stream #0:0, 0, 1/25: Video: h264 (libopenh264), 1 reference frame, yuv420p, 640x480 (0x0), 0/1, q=2-31, 400 kb/s, 25 tbn
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:RcUpdateIntraComplexity iFrameDqBits = 385808,iQStep= 2016,iIntraCmplx = 777788928
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:[Rc]Layer 0: Frame timestamp = 0, Frame type = 2, encoding_qp = 30, average qp = 30, max qp = 33, min qp = 27, index = 0, iTid = 0, used = 385808, bitsperframe = 16000, target = 64000, remainingbits = -257808, skipbuffersize = 200000
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerNum = 2,iFrameSize = 48252
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 0,iNalType = 0,iNalCount = 2, first Nal Length=18,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 1,iNalType = 1,iNalCount = 6, first Nal Length=6057,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 @ 0x244aa00] 6 slices
./scriptBuild.sh: line 20: 10625 Segmentation fault      (core dumped) ./display

如您所见,FFmpeg使用libopenh264并正确配置它。但是,无论如何。它总是返回相同的分段错误错误...

我使用过这样的命令。

ffmpeg -s 640x480 -f video4linux2 -i /dev/video0 -r 30 -vcodec libopenh264 -an -f h264 udp://127.0.0.1:8080

它完美无缺,但我需要在发送之前处理帧。这就是为什么我要尝试使用这些库。

我的FFmpeg版本是。

ffmpeg version 3.3.6 Copyright (c) 2000-2017 the FFmpeg developers
built with gcc 4.8 (Ubuntu 4.8.4-2ubuntu1~14.04.3)
configuration: --disable-yasm --enable-shared --enable-libopenh264 --cc='gcc -fPIC'
libavutil      55. 58.100 / 55. 58.100
libavcodec     57. 89.100 / 57. 89.100
libavformat    57. 71.100 / 57. 71.100
libavdevice    57.  6.100 / 57.  6.100
libavfilter     6. 82.100 /  6. 82.100
libswscale      4.  6.100 /  4.  6.100
libswresample   2.  7.100 /  2.  7.100

我尝试使用gbd获取有关错误的更多信息,但它没有给我调试信息。

我该如何解决这个问题?我不知道还能尝试什么...

谢谢!

1 个答案:

答案 0 :(得分:0)

avpicture_fill已弃用。我认为这是错误的来源。试试av_image_fill_arrays()

示例行应该是:

av_image_fill_arrays(pFrameBGR.data,        /* destination */
                     pFrameBGR.linesize,    /* destination */
                     image.data,            /* source      */
                     AV_PIX_FMT_BGR24,      /* source      */
                     WIDTH, HEIGHT, 1);     /* source w+h & alingment */

希望有所帮助。