快速TS片段ffmpeg解码 - 内存泄漏

时间:2017-08-08 18:08:39

标签: c++ ffmpeg

环境:

  • Ubuntu 16.04(x64)
  • C ++
  • 的ffmpeg

用例

  • 快速解码多个MPEG-TS片段(每秒钟很多)
  • TS片段的格式是动态的,无法提前知道
  • 需要提取每个片段的第一个A / V帧

问题陈述

  • 下面的代码成功解码了A / V,但是有很大的内存泄漏(MBytes / sec)
  • 根据文档似乎所有内存都应该被释放(它是......?)

为什么我会收到这个巨大的内存泄漏,我在下面的代码中忽略了什么?

    struct MEDIA_TYPE {
        ffmpeg::AVMediaType eType;
        union {
            struct {
                ffmpeg::AVPixelFormat   colorspace;
                int                     width, height;
                float                   fFPS;
            } video;
            struct : WAVEFORMATEX {
                short sSampleFormat;
            } audio;
        } format;
    };

    struct FRAME {
        enum { MAX_PALNES = 3 + 1 };
        int         iStrmId;
        int64_t     pts;    // Duration in 90Khz clock resolution

        uint8_t**   ppData; // Null terminated
        int32_t*    pStride;// Zero terminated
    };

    HRESULT ProcessTS(IN Operation op, IN uint8_t* pTS, IN uint32_t uiBytes, bool(*cb)(IN const MEDIA_TYPE& mt, IN FRAME& frame, IN PVOID pCtx), IN PVOID pCbCtx) 
    {
        uiBytes -= uiBytes % 188;// align to 188 packet size

        struct CONTEXT {
            uint8_t* pTS;
            uint32_t uiBytes;
            int32_t  iPos;
        } ctx = { pTS, uiBytes, 0 };

        LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this);

        ffmpeg::AVFormatContext* pFmtCtx = 0;
        if (0 == (pFmtCtx = ffmpeg::avformat_alloc_context()))
            return E_OUTOFMEMORY;

        ffmpeg::AVIOContext* pIoCtx = ffmpeg::avio_alloc_context(pTS, uiBytes, 0, &ctx
                                                            , [](void *opaque, uint8_t *buf, int buf_size)->int {
                                                                    auto pCtx = (CONTEXT*)opaque;
                                                                    int size = pCtx->uiBytes;
                                                                    if (pCtx->uiBytes - pCtx->iPos < buf_size)
                                                                        size = pCtx->uiBytes - pCtx->iPos;
                                                                    if (size > 0) {
                                                                        memcpy(buf, pCtx->pTS + pCtx->iPos, size);
                                                                        pCtx->iPos += size;
                                                                    }
                                                                    return size;
                                                                }
                                                            , 0 
                                                            , [](void* opaque, int64_t offset, int whence)->int64_t { 
                                                                auto pCtx = (CONTEXT*)opaque;
                                                                switch (whence)
                                                                    {
                                                                    case SEEK_SET:
                                                                    pCtx->iPos = offset;
                                                                    break;
                                                                case SEEK_CUR:
                                                                    pCtx->iPos += offset;
                                                                    break;
                                                                case SEEK_END:
                                                                    pCtx->iPos = pCtx->uiBytes - offset;
                                                                    break;
                                                                case AVSEEK_SIZE:
                                                                    return pCtx->uiBytes;
                                                                }
                                                                return pCtx->iPos;
                                                            });

    pFmtCtx->pb = pIoCtx;

    int iRet = ffmpeg::avformat_open_input(&pFmtCtx, "fakevideo.ts", m_pInputFmt, 0);
    if (ERROR_SUCCESS != iRet) {
        assert(false);
        pFmtCtx = 0;// a user-supplied AVFormatContext will be freed on failure.
        return E_FAIL;
    }

    struct DecodeContext {
        ffmpeg::AVStream*   pStream;
        ffmpeg::AVCodec*    pDecoder;
        int                 iFramesProcessed;
    };

    HRESULT hr                  = S_OK;
    int     iStreamsProcessed   = 0;

    bool    bVideoFound         = false;
    int64_t ptsLast             = 0;
    int64_t dtsLast             = 0;

    auto pContext = (DecodeContext*)alloca(sizeof(DecodeContext) * pFmtCtx->nb_streams);
    for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++) {
        assert(pFmtCtx->streams[i]->index == i);
        pContext[i].pStream         = pFmtCtx->streams[i];
        pContext[i].pDecoder        = ffmpeg::avcodec_find_decoder(pFmtCtx->streams[i]->codec->codec_id);
        pContext[i].iFramesProcessed= 0;
        if (0 == pContext[i].pDecoder)
            continue;
        if ((iRet = ffmpeg::avcodec_open2(pFmtCtx->streams[i]->codec, pContext[i].pDecoder, NULL)) < 0) {
            _ASSERT(FALSE);
            hr = E_FAIL;
            goto ErrExit;
        }
    }

    while (S_OK == hr) {
        ffmpeg::AVFrame* pFrame = 0;
        ffmpeg::AVPacket pkt;
        ffmpeg::av_init_packet(&pkt);
        if (ERROR_SUCCESS != (iRet = ffmpeg::av_read_frame(pFmtCtx, &pkt))) {
            hr = E_FAIL;
            break;
        }
        if ((0 == dtsLast) && (0 != pkt.dts))
            dtsLast = pkt.dts;
        if ((0 == ptsLast) && (0 != pkt.pts))
            ptsLast = pkt.pts;
        DecodeContext& ctx = pContext[pkt.stream_index];
        if (Operation::DECODE_FIRST_FRAME_OF_EACH_STREAM == op) {
            if (iStreamsProcessed == pFmtCtx->nb_streams) {
                hr = S_FALSE;
                goto Next;
            }
            if (ctx.iFramesProcessed > 0)
                goto Next;
            iStreamsProcessed++;
        }
        if (0 == ctx.pDecoder)
            goto Next;

        if (0 == (pFrame = ffmpeg::av_frame_alloc())) {
            hr = E_OUTOFMEMORY;
            goto Next;
        }

        LOGTRACE(TSDecoder, "ProcessTS(%d, 0x%.8x, %d, 0x%.8x, 0x%.8x), this=0x%.8x, decode, S:%d, T:%d\r\n", (int)op, pTS, uiBytes, cb, pCbCtx, this, pkt.stream_index, ctx.pStream->codec->codec_type);
        int         bGotFrame   = false;
        int         iBytesUsed  =   0;
        MEDIA_TYPE  mt;
        memset(&mt, 0, sizeof(mt));
        mt.eType = ctx.pStream->codec->codec_type;
        switch (mt.eType) {
        case ffmpeg::AVMediaType::AVMEDIA_TYPE_AUDIO:
            ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
            if((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
                hr = E_FAIL;
                goto Next;
            }
            _ASSERT(pkt.size == iRet);
            // FFMPEG AAC decoder oddity, first call to 'avcodec_decode_audio4' results mute audio where the second result the expected audio
            bGotFrame = false;
            if ((iRet = ffmpeg::avcodec_decode_audio4(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
                hr = E_FAIL;
                goto Next;
            }
            _ASSERT(pkt.size == iRet);
            ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
            if (false == bGotFrame)
                goto Next;

            iBytesUsed                      = ctx.pStream->codec->frame_size;
            mt.format.audio.nChannels       = ctx.pStream->codec->channels;
            mt.format.audio.nSamplesPerSec  = ctx.pStream->codec->sample_rate;
            mt.format.audio.wBitsPerSample  = ffmpeg::av_get_bytes_per_sample(ctx.pStream->codec->sample_fmt) * 8;
            mt.format.audio.nBlockAlign     = mt.format.audio.nChannels * mt.format.audio.wBitsPerSample / 8;
            mt.format.audio.sSampleFormat   = (short)pFrame->format;
            break;
        case ffmpeg::AVMediaType::AVMEDIA_TYPE_VIDEO:
            if ((iRet = ffmpeg::avcodec_decode_video2(ctx.pStream->codec, pFrame, &bGotFrame, &pkt)) < 0) {
                hr = E_FAIL;
                break;
            }
            if (false == bGotFrame)
                goto Next;

            assert(ffmpeg::AVPixelFormat::AV_PIX_FMT_YUV420P == ctx.pStream->codec->pix_fmt);// Thats is the only color space currently supported
            iBytesUsed                      = (ctx.pStream->codec->width * ctx.pStream->codec->height * 3) / 2;
            mt.format.video.width           = ctx.pStream->codec->width;
            mt.format.video.height          = ctx.pStream->codec->height;
            mt.format.video.colorspace      = ctx.pStream->codec->pix_fmt;
            mt.format.video.fFPS            = (float)ctx.pStream->codec->framerate.num / ctx.pStream->codec->framerate.den;
            bVideoFound                     = true;
            break;
        default:
            goto Next;
        }

        ctx.iFramesProcessed++;

        {
            FRAME f = { ctx.pStream->index, ((0 == ptsLast) ? dtsLast : ptsLast), (uint8_t**)pFrame->data, (int32_t*)pFrame->linesize };
            if ((iRet > 0) && (false == cb(mt, f, pCbCtx)))
                hr = S_FALSE;// Breaks the loop
        }
    Next:
        ffmpeg::av_free_packet(&pkt);
        if (0 != pFrame) {
            //ffmpeg::av_frame_unref(pFrame);
            ffmpeg::av_frame_free(&pFrame);
            pFrame = 0;
        }
    }

ErrExit:
    for (unsigned int i = 0; i < pFmtCtx->nb_streams; i++)
        ffmpeg::avcodec_close(pFmtCtx->streams[i]->codec);

    pIoCtx->buffer = 0;// We have allocated the buffer, no need for ffmpeg to free it 4 us
    pFmtCtx->pb = 0;
    ffmpeg::av_free(pIoCtx);
    ffmpeg::avformat_close_input(&pFmtCtx);
    ffmpeg::avformat_free_context(pFmtCtx);
    return hr;
}

1 个答案:

答案 0 :(得分:0)

您需要在重新使用之前取消重新启动数据包。并且不需要一直分配和解除分配它们。

以下是我如何做到这可能对您有所帮助:

// Initialise a packet queue

std::list<AVPacket *> packets;
...
for (int c = 0; c < MAX_PACKETS; c++) {
    ff->packets.push_back(av_packet_alloc());
}

while (!quit) {

... get packet from queue

int err = av_read_frame(ff->context, packet);

... process packet (audio, video, etc)

av_packet_unref(packet); // add back to queue for reuse

}

// Release packets

while (ff->packets.size()) { // free packets
    AVPacket *packet = ff->packets.front();
    av_packet_free(&packet);
    ff->packets.pop_front();
}

在您的代码中,您已经释放了一个首先未分配的数据包。