使用avcodec_encode_video2()进行随机分段故障

时间:2015-08-06 22:50:37

标签: c++ video ffmpeg libavcodec libav

这是我的第一个问题,所以我希望我做得正确。如果没有,请让我知道解决它。

我试图使用ffmpeg库将短暂的(10秒)mp4视频文件转换为gif(我很高兴使用ffmpeg)。该程序很好地转换为gif,但有时会随机崩溃。

这是我使用的ffmpeg库的版本:

libavutil      54. 27.100
libavcodec     56. 41.100
libavformat    56. 36.100
libavdevice    56.  4.100
libavfilter     5. 16.101
libavresample   2.  1.  0
libswscale      3.  1.101
libswresample   1.  2.100
libpostproc    53.  3.100

我使用的是1920x1080p的视频,所以为了生成gif我正在进行像素格式转换,从AV_PIX_FMT_YUV420PAV_PIX_FMT_RGB8,从初始分辨率调整大小至432x240p。

以下是代码:

int VideoManager::loadVideo(QString filename, bool showInfo)
{
    if(avformat_open_input(&iFmtCtx, filename.toStdString().c_str(), 0, 0) < 0)
    {
        qDebug() << "Could not open input file " << filename;
        closeInput();
        return -1;
    }
    if (avformat_find_stream_info(iFmtCtx, 0) < 0)
    {
        qDebug() << "Failed to retrieve input stream information";
        closeInput();
        return -2;
    }

    videoStreamIndex = -1;
    for(unsigned int i = 0; i < iFmtCtx->nb_streams; ++i)
        if(iFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
        {
            videoStreamIndex = i;
            break;
        }

    if(videoStreamIndex == -1)
    {
        qDebug() << "Didn't find any video stream!";
        closeInput();
        return -3;
    }
    iCodecCtx = iFmtCtx->streams[videoStreamIndex]->codec;

    iCodec = avcodec_find_decoder(iCodecCtx->codec_id);
    if(iCodec == NULL) // Codec not found
    {
        qDebug() << "Codec not found!";
        closeInput();
        return -4;
    }
    if(avcodec_open2(iCodecCtx, iCodec, NULL) < 0)
    {
        qDebug() << "Could not open codec!";
        closeInput();
        return -1;
    }

    if(showInfo)
        av_dump_format(iFmtCtx, 0, filename.toStdString().c_str(), 0);

    return 0;
}

void VideoManager::generateGif(QString filename)
{
    int ret, frameCount = 0;
    AVPacket packet;
    packet.data = NULL;
    packet.size = 0;
    AVFrame *frame = NULL;
    unsigned int stream_index;
    int got_frame;

    gifHeight = iFmtCtx->streams[videoStreamIndex]->codec->height;
    gifWidth  = iFmtCtx->streams[videoStreamIndex]->codec->width;

    if(gifHeight > MAX_GIF_HEIGHT || gifWidth > MAX_GIF_WIDTH)
    {
        if(gifHeight > gifWidth)
        {
            gifWidth  = (float)gifWidth * ( (float)MAX_GIF_HEIGHT / (float)gifHeight );
            gifHeight = MAX_GIF_HEIGHT;
        }
        else
        {
            gifHeight = (float)gifHeight * ( (float)MAX_GIF_WIDTH / (float)gifWidth );
            gifWidth  = MAX_GIF_WIDTH;
        }
    }


    if(openOutputFile(filename.toStdString().c_str()) < 0)
    {
        qDebug() << "Error openning output file: " << filename;
        return;
    }

    while (1) {
        int ret = av_read_frame(iFmtCtx, &packet);
        if (ret < 0)
        {
            if(ret != AVERROR_EOF)
                qDebug() << "Error reading frame: " << ret;
            break;
        }
        stream_index = packet.stream_index;

        if(stream_index == videoStreamIndex)
        {
            frame = av_frame_alloc();
            if (!frame) {
                qDebug() << "Error allocating frame";
                break;
            }
            av_packet_rescale_ts(&packet,
                                 iFmtCtx->streams[stream_index]->time_base,
                                 iFmtCtx->streams[stream_index]->codec->time_base);

            ret = avcodec_decode_video2(iFmtCtx->streams[stream_index]->codec, frame,
                    &got_frame, &packet);
            if (ret < 0) {
                qDebug() << "Decoding failed";
                break;
            }

            if(got_frame)
            {
                qDebug() << ++frameCount;
                nframes++;
                frame->pts = av_frame_get_best_effort_timestamp(frame);

                ////////////////////////////////////////////////////////////////////////////////
                /// Pixel format convertion and resize
                ////////////////////////////////////////////////////////////////////////////////
                uint8_t *out_buffer = NULL;
                SwsContext *img_convert_ctx = NULL;
                AVFrame *pFrameRGB = av_frame_alloc();

                if(pFrameRGB == NULL)
                {
                    qDebug() << "Error allocating frameRGB";
                    break;
                }

                AVPixelFormat pixFmt;
                switch (iFmtCtx->streams[stream_index]->codec->pix_fmt)
                {
                case AV_PIX_FMT_YUVJ420P : pixFmt = AV_PIX_FMT_YUV420P; break;
                case AV_PIX_FMT_YUVJ422P : pixFmt = AV_PIX_FMT_YUV422P; break;
                case AV_PIX_FMT_YUVJ444P : pixFmt = AV_PIX_FMT_YUV444P; break;
                case AV_PIX_FMT_YUVJ440P : pixFmt = AV_PIX_FMT_YUV440P; break;
                default:
                    pixFmt = iFmtCtx->streams[stream_index]->codec->pix_fmt;
                }

                out_buffer = (uint8_t*)av_malloc( avpicture_get_size( AV_PIX_FMT_RGB8,
                                                  gifWidth,
                                                  gifHeight ));
                if(!out_buffer)
                {
                    qDebug() << "Error alocatting out_buffer!";
                }
                avpicture_fill((AVPicture *)pFrameRGB, out_buffer, AV_PIX_FMT_RGB8,
                               gifWidth,
                               gifHeight);
                img_convert_ctx = sws_getContext( iFmtCtx->streams[stream_index]->codec->width,
                                                  iFmtCtx->streams[stream_index]->codec->height,
                                                  pixFmt,
                                                  gifWidth,
                                                  gifHeight,
                                                  AV_PIX_FMT_RGB8,
                                                  SWS_ERROR_DIFFUSION, NULL, NULL, NULL );

                if(!img_convert_ctx)
                {
                    qDebug() << "error getting sws context";
                }

                sws_scale( img_convert_ctx, (const uint8_t* const*)frame->data,
                           frame->linesize, 0,
                           iFmtCtx->streams[stream_index]->codec->height,
                           pFrameRGB->data,
                           pFrameRGB->linesize );

                pFrameRGB->format = AV_PIX_FMT_RGB8;
                pFrameRGB->pts = frame->pts;
                pFrameRGB->best_effort_timestamp = frame->best_effort_timestamp;
                pFrameRGB->width = gifWidth;
                pFrameRGB->height = gifHeight;
                pFrameRGB->pkt_dts = frame->pkt_dts;
                pFrameRGB->pkt_pts = frame->pkt_pts;
                pFrameRGB->pkt_duration = frame->pkt_duration;
                pFrameRGB->pkt_pos = frame->pkt_pos;
                pFrameRGB->pkt_size = frame->pkt_size;
                pFrameRGB->interlaced_frame = frame->interlaced_frame;
                ////////////////////////////////////////////////////////////////////////////////
                ret = encodeAndWriteFrame(pFrameRGB, stream_index, NULL);
                //av_frame_free(&frame);
                //av_free(out_buffer);
                //sws_freeContext(img_convert_ctx);
                if (ret < 0)
                {
                    qDebug() << "Error encoding and writting frame";
                    //av_free_packet(&packet);
                    closeOutput();
                }
            } 
            else {
                //av_frame_free(&frame);
            }
        }
        av_free_packet(&packet);
    }

    ret = flushEncoder(videoStreamIndex);
    if (ret < 0)
    {
        qDebug() << "Flushing encoder failed";
    }

    av_write_trailer(oFmtCtx);

    //av_free_packet(&packet);
    //av_frame_free(&frame);
    closeOutput();
}


void VideoManager::closeOutput()
{
    if (oFmtCtx && oFmtCtx->nb_streams > 0 && oFmtCtx->streams[0] && oFmtCtx->streams[0]->codec)
        avcodec_close(oFmtCtx->streams[0]->codec);
    if (oFmtCtx && oFmt && !(oFmt->flags & AVFMT_NOFILE))
        avio_closep(&oFmtCtx->pb);
    avformat_free_context(oFmtCtx);
}

int VideoManager::openOutputFile(const char *filename)
{
    AVStream *out_stream;
    AVStream *in_stream;
    AVCodecContext *dec_ctx, *enc_ctx;
    AVCodec *encoder;
    int ret;

    oFmtCtx = NULL;
    avformat_alloc_output_context2(&oFmtCtx, NULL, NULL, filename);
    if (!oFmtCtx) {
        qDebug() << "Could not create output context";
        return AVERROR_UNKNOWN;
    }

    oFmt = oFmtCtx->oformat;

    out_stream = avformat_new_stream(oFmtCtx, NULL);
    if (!out_stream) {
        qDebug() << "Failed allocating output stream";
        return AVERROR_UNKNOWN;
    }

    in_stream = iFmtCtx->streams[videoStreamIndex];
    dec_ctx = in_stream->codec;
    enc_ctx = out_stream->codec;

    encoder = avcodec_find_encoder(AV_CODEC_ID_GIF);
    if (!encoder) {
        qDebug() << "FATAL!: Necessary encoder not found";
        return AVERROR_INVALIDDATA;
    }

    enc_ctx->height = gifHeight;    
    enc_ctx->width = gifWidth;      
    enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
    enc_ctx->pix_fmt = AV_PIX_FMT_RGB8;
    enc_ctx->time_base = dec_ctx->time_base;
    ret = avcodec_open2(enc_ctx, encoder, NULL);
    if (ret < 0) {
        qDebug() << "Cannot open video encoder for gif";
        return ret;
    }

    if (oFmt->flags & AVFMT_GLOBALHEADER)
        enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    if (!(oFmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oFmtCtx->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            qDebug() << "Could not open output file " << filename;
            return ret;
        }
    }

    ret = avformat_write_header(oFmtCtx, NULL);
    if (ret < 0) {
        qDebug() << "Error occurred when opening output file";
        return ret;
    }

    return 0;
}


int VideoManager::encodeAndWriteFrame(AVFrame *frame, unsigned int stream_index, int *got_frame) {
    int ret;
    int got_frame_local;
    AVPacket enc_pkt;

    if (!got_frame)
        got_frame = &got_frame_local;

    enc_pkt.data = NULL;
    enc_pkt.size = 0;
    av_init_packet(&enc_pkt);
    ret = avcodec_encode_video2(oFmtCtx->streams[stream_index]->codec, &enc_pkt,
            frame, got_frame);
    //av_frame_free(&frame);
    if (ret < 0)
        return ret;
    if (!(*got_frame))
        return 0;

    enc_pkt.stream_index = stream_index;
    av_packet_rescale_ts(&enc_pkt,
                         oFmtCtx->streams[stream_index]->codec->time_base,
                         oFmtCtx->streams[stream_index]->time_base);

    ret = av_interleaved_write_frame(oFmtCtx, &enc_pkt);
    return ret;
}


int VideoManager::flushEncoder(unsigned int stream_index)
{
    int ret;
    int got_frame;

    if (!(oFmtCtx->streams[stream_index]->codec->codec->capabilities &
                CODEC_CAP_DELAY))
        return 0;

    while (1) {
        ret = encodeAndWriteFrame(NULL, stream_index, &got_frame);
        if (ret < 0)
            break;
        if (!got_frame)
            return 0;
    }
    return ret;
}

我知道有很多内存泄漏。我删除/评论了大部分免费功能的意图,因为我认为这是问题所在。

我使用Qtcreator,所以当我调试程序时,这是输出:

Level Function                            Line
0     av_image_copy                       303
1     frame_copy_video                    650     
2     av_frame_copy                       687     
3     av_frame_ref                        384     
4     gif_encode_frame                    307     
5     avcodec_encode_video2               2191     
6     VideoManager::encodeAndWriteFrame   813     
7     VideoManager::generateGif           375     
8     qMain                               31     
9     WinMain*16                          112     
10    main

我已检查过程序崩溃是否存在特定帧,但它也是一个随机帧。

知道我做错了什么?任何帮助都将非常感激。

修改

经过几天的痛苦,痛苦和挫折之后,我决定从头开始编写整个代码。两次我都是从this示例开始并修改它以便像我之前描述的那样工作。它现在完美地运作:D!我在旧代码(之前发布)中找到的唯一错误是当我尝试访问我使用videoStreamIndex的输出文件中的视频流时,但该索引来自输入文件中的视频流。有时它可能是相同的索引,有时则不是。但它并没有解释为什么它会随机崩溃。如果这是崩溃的原因,每次我使用相同的视频运行代码时它都应该崩溃。可能,该代码中存在更多错误。 请注意,如果在上面的代码中修复该错误实际上解决了崩溃问题,我还没有测试过。

1 个答案:

答案 0 :(得分:0)

我认为你的参数可能会混淆。 According to what I'm reading from the documentation avcodec_decode_video2的原型如下:

int avcodec_decode_video2 (AVCodecContext * avctx,
                           AVFrame * picture,
                           int * got_picture_ptr,
                           const AVPacket * avpkt)  

并且被称为:

ret = avcodec_encode_video2(oFmtCtx->streams[stream_index]->codec, // Dunno.
                            &enc_pkt, //AVPacket * should be AVFrame *
                            frame, //AVFrame * Should be int *
                            got_frame); // int * should be AVPacket *