在MPEG-TS上使用libavcodec来转码视频和复制音频

时间:2017-08-15 16:46:24

标签: audio video ffmpeg libavcodec libavformat

我尝试使用libavcodec来降低MPEG传输流的视频比特率,同时保持音频流不变。等效的ffmpeg命令行将是:

  

ffmpeg -i SPTS_HD_2min.prog.ts -b:v 800k -s cif -map 0:0 -map 0:1 -map 0:2 -c:a copy ./transcodeCLI.ts

输入文件包含:

Input #0, mpegts, from 'SPTS_HD_2min.program8.ts': Duration: 00:02:01.56 bitrate: 10579 kb/s
Program 1 
  Stream #0:0[0x21]: Video: h264 (High) ([27][0][0][0] / 0x001B), yuv420p(tv, bt709, top first), 1920x1080 [SAR 1:1 DAR 16:9], 25 fps, 25 tbr, 90k tbn, 50 tbc
  Stream #0:1[0x61](eng): Audio: ac3 (AC-3 / 0x332D4341), 48000 Hz, 5.1(side), fltp, 384 kb/s
  Stream #0:2[0x63](eng): Audio: mp2 ([3][0][0][0] / 0x0003), 48000 Hz, stereo, s16p, 192 kb/s

使用transcoding.c example,程序生成有效的传输流。但是,该文件不播放。此外,分析工具显示文件持续时间几乎是两倍。有些事情不对。

程序很简单:读取,编码,循环写入。视频流的编解码器上下文被设置为较低的比特率和宽度/高度。

我很长时间没有成功,但是我没有设定任何预期的行为...

非常概括的来源直接在下面。 (为了简洁起见,我删除了变量defs,错误检查和调试消息)。

修改

在此之后,我已经包含了完整的,可编译的源代码。

编辑II

可以从this sharing server

下载MPEG-TS测试文件
typedef struct StreamContext
{
    AVCodecContext *decodeCtx;
    AVCodecContext *encodeCtx;
} StreamContext;
static StreamContext *streamCtx;


static int
openInputFile(const char *filename)
{
    inFormatCtx = NULL;
    ret = avformat_open_input(&inFormatCtx, filename, NULL, NULL);
    ret = avformat_find_stream_info(inFormatCtx, NULL)
    streamCtx = av_mallocz_array(inFormatCtx->nb_streams, sizeof(*streamCtx));

    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        AVStream *stream = inFormatCtx->streams[i];
        AVCodec     *dec = avcodec_find_decoder(stream->codecpar->codec_id);
        AVCodecContext *pCodecCtx = avcodec_alloc_context3(dec);

        ret = avcodec_parameters_to_context(pCodecCtx, stream->codecpar);

        if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO || pCodecCtx->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO)
                pCodecCtx->framerate = av_guess_frame_rate(inFormatCtx, stream, NULL);

            /* Open decoder */
            ret = avcodec_open2(pCodecCtx, dec, NULL);
        }
        streamCtx[i].decodeCtx = pCodecCtx;
    }
    return 0;
}

static int
openOutputFile(const char *filename)
{
    outFormatCtx = NULL;
    avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, filename);

    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        out_stream = avformat_new_stream(outFormatCtx, NULL);
        in_stream  = inFormatCtx->streams[i];
        decodeCtx  = streamCtx[i].decodeCtx;

        if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO || decodeCtx->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            encoder = avcodec_find_encoder(decodeCtx->codec_id);
            pEncodeCtx = avcodec_alloc_context3(encoder);

            if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO)
            {
                // MAKE IT SMALLER!
                pEncodeCtx->height = decodeCtx->height / 4;
                pEncodeCtx->width  = decodeCtx->width  / 4;
                pEncodeCtx->sample_aspect_ratio = decodeCtx->sample_aspect_ratio;

                // perhaps set these too?
                pEncodeCtx->bit_rate = 700000;
                pEncodeCtx->bit_rate_tolerance = 0;
                pEncodeCtx->framerate = decodeCtx->framerate;
                pEncodeCtx->time_base = decodeCtx->time_base;

                /* take first format from list of supported formats */
                if (encoder->pix_fmts)
                    pEncodeCtx->pix_fmt = encoder->pix_fmts[0];
                else
                    pEncodeCtx->pix_fmt = decodeCtx->pix_fmt;

                /* video time_base can be set to whatever is handy and supported by encoder */
                pEncodeCtx->time_base = av_inv_q(decodeCtx->framerate);
            }
            else
            {
                pEncodeCtx->sample_rate    = decodeCtx->sample_rate;
                pEncodeCtx->channel_layout = decodeCtx->channel_layout;
                pEncodeCtx->channels = av_get_channel_layout_nb_channels(pEncodeCtx->channel_layout);
                /* take first format from list of supported formats */
                pEncodeCtx->sample_fmt = encoder->sample_fmts[0];
                pEncodeCtx->time_base  = (AVRational) { 1, pEncodeCtx->sample_rate };
            }

            ret = avcodec_open2(pEncodeCtx, encoder, NULL);
            ret = avcodec_parameters_from_context(out_stream->codecpar, pEncodeCtx);

            if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
                pEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

            out_stream->time_base = pEncodeCtx->time_base;
            streamCtx[i].encodeCtx = pEncodeCtx;
        }
        else if (decodeCtx->codec_type == AVMEDIA_TYPE_UNKNOWN)
            return AVERROR_INVALIDDATA;
        else
        {
            /* if this stream must be remuxed */
            ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
            out_stream->time_base = in_stream->time_base;
        }

    }
    av_dump_format(outFormatCtx, 0, filename, 1);

    if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE))
        ret = avio_open(&outFormatCtx->pb, filename, AVIO_FLAG_WRITE);

    /* init muxer, write output file header */
    ret = avformat_write_header(outFormatCtx, NULL);

    return 0;
}

static int
encodeAndWriteFrame(AVFrame *inFrame, unsigned int streamIndex, int *pGotFrame)
{
    encodedPkt.data = NULL;
    encodedPkt.size = 0;
    av_init_packet(&encodedPkt);

    int codecType = inFormatCtx->streams[streamIndex]->codecpar->codec_type;

    if (codecType == AVMEDIA_TYPE_VIDEO)
        ret = avcodec_encode_video2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);
    else
        ret = avcodec_encode_audio2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);

    if (*pGotFrame == 0)
        return 0;

    /* prepare packet for muxing */
    encodedPkt.stream_index = streamIndex;
    av_packet_rescale_ts(&encodedPkt, streamCtx[streamIndex].encodeCtx->time_base, outFormatCtx->streams[streamIndex]->time_base);

    /* mux encoded frame */
    ret = av_interleaved_write_frame(outFormatCtx, &encodedPkt);
    return ret;
}

int
main(int argc, char **argv)
{
    av_register_all();
    avfilter_register_all();

    if ((ret = openInputFile(argv[1])) < 0)
        goto end;
    if ((ret = openOutputFile(argv[2])) < 0)
        goto end;

    /* read all packets */
    while (1)
    {
        if ((ret = av_read_frame(inFormatCtx, &packet)) < 0)
            break;
        readPktNum++;

        streamIndex = packet.stream_index;
        type = inFormatCtx->streams[packet.stream_index]->codecpar->codec_type;
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIndex);

        if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)
        {
            pDecFrame = av_frame_alloc();

            av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, streamCtx[streamIndex].decodeCtx->time_base);

            if (type == AVMEDIA_TYPE_VIDEO)
                ret = avcodec_decode_video2(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);
            else
                ret = avcodec_decode_audio4(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);

            if (gotDecFrame)
            {
                pDecFrame->pts = av_frame_get_best_effort_timestamp(pDecFrame);

                ret = encodeAndWriteFrame(pDecFrame, streamIndex, 0);
                av_frame_free(&pDecFrame);
                if (ret < 0)
                    goto end;
            }
            else
                av_frame_free(&pDecFrame);
        }
        else
        {
            /* remux this frame without reencoding */
            av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, outFormatCtx->streams[streamIndex]->time_base);

            ret = av_interleaved_write_frame(outFormatCtx, &packet);
            if (ret < 0)
                goto end;
        }
        av_packet_unref(&packet);
    }

    /* flush encoders */
    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        /* flush encoder */
        ret = flushEncoder(i);
    }

    av_write_trailer(outFormatCtx);

end:
    av_packet_unref(&packet);
    av_frame_free(&pDecFrame);
    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        avcodec_free_context(&streamCtx[i].decodeCtx);
        if (outFormatCtx && outFormatCtx->nb_streams > i && outFormatCtx->streams[i] && streamCtx[i].encodeCtx)
            avcodec_free_context(&streamCtx[i].encodeCtx);
    }

    av_free(streamCtx);
    avformat_close_input(&inFormatCtx);

    if (outFormatCtx && !(outFormatCtx->oformat->flags & AVFMT_NOFILE))
        avio_closep(&outFormatCtx->pb);

    avformat_free_context(outFormatCtx);

    return ret ? 1 : 0;
}

**编辑 - 完整来源**

/**
 * @file
 * API example for demuxing, decoding, filtering, encoding and muxing
 * @example transcoding.c
 */

#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"

#define ANSI_COLOR_RED     "\x1b[31m"
#define ANSI_COLOR_PINK    "\x1b[31;1m"
#define ANSI_COLOR_GREEN   "\x1b[32m"
#define ANSI_COLOR_LIME    "\x1b[32;1m"
#define ANSI_COLOR_YELLOW  "\x1b[33;1m"
#define ANSI_COLOR_BLUE    "\x1b[34;1m"
#define ANSI_COLOR_MAGENTA "\x1b[35m"
#define ANSI_COLOR_CYAN    "\x1b[36;1m"
#define ANSI_COLOR_RESET   "\x1b[0m"

#define true 1
#define false 0

static int readPktNum  = 0;
static int decFrameNum = 0;
static int encFrameNum = 0;

static AVFormatContext *inFormatCtx;
static AVFormatContext *outFormatCtx;

typedef struct StreamContext
{
    AVCodecContext *decodeCtx;
    AVCodecContext *encodeCtx;
} StreamContext;
static StreamContext *streamCtx;

void
writeAVFrameAsYUVFile(const char *filePath, AVFrame *pFrame)
{
    printf("Writing YUV file: %d x %d\n", pFrame->width, pFrame->height);
    FILE *pFile = fopen(filePath, "wb");
    if (!pFile)
        return;

    int y;

    // Writing Y
    for (y=0; y < pFrame->height; y++)
        fwrite(&pFrame->data[0][pFrame->linesize[0] * y], pFrame->width, 1, pFile);

    // Writing U
    for (y=0; y < pFrame->height/2; y++)
        fwrite(&pFrame->data[1][pFrame->linesize[1] * y], pFrame->width/2, 1, pFile);

    // Writing V
    for (y=0; y < pFrame->height/2; y++)
        fwrite(&pFrame->data[2][pFrame->linesize[2] * y], pFrame->width/2, 1, pFile);

    fclose(pFile);
    printf("Wrote %s: %d x %d\n", filePath, pFrame->width, pFrame->height);
}

static void
dumpCodecContext(const AVCodecContext *pCodecContext)
{
    printf("Codec Context:\n");
    printf(" bit rate           %d\n",      (int)pCodecContext->bit_rate);
    printf(" bit rate tolerance %d\n",      pCodecContext->bit_rate_tolerance);
    printf(" size               %d x %d\n", pCodecContext->width, pCodecContext->height);
    printf(" GOP Size           %d\n",      pCodecContext->gop_size);
    printf(" Max B Frames       %d\n",      pCodecContext->max_b_frames);

    printf(" Sample Aspect      %d:%d (%.3f)\n",
        pCodecContext->sample_aspect_ratio.num, pCodecContext->sample_aspect_ratio.den,
        1.0 * pCodecContext->sample_aspect_ratio.num / pCodecContext->sample_aspect_ratio.den);

    printf(" framerate          %d / %d (%.3f fps)\n",
        pCodecContext->framerate.num, pCodecContext->framerate.den,
        1.0 * pCodecContext->framerate.den / pCodecContext->framerate.num);

    printf(" time_base          %d / %d (%.3f fps)\n",
        pCodecContext->time_base.num, pCodecContext->time_base.den,
        1.0 * pCodecContext->time_base.den / pCodecContext->time_base.num);
}

static int
openInputFile(const char *filename)
{
    int ret;
    unsigned int i;

    inFormatCtx = NULL;
    if ((ret = avformat_open_input(&inFormatCtx, filename, NULL, NULL)) < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
        return ret;
    }

    if ((ret = avformat_find_stream_info(inFormatCtx, NULL)) < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
        return ret;
    }

    streamCtx = av_mallocz_array(inFormatCtx->nb_streams, sizeof(*streamCtx));
    if (!streamCtx)
        return AVERROR(ENOMEM);

    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        AVStream *stream = inFormatCtx->streams[i];
        AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
        AVCodecContext *pCodecCtx;
        if (!dec)
        {
            av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
            return AVERROR_DECODER_NOT_FOUND;
        }

        pCodecCtx = avcodec_alloc_context3(dec);
        if (!pCodecCtx)
        {
            av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
            return AVERROR(ENOMEM);
        }

        ret = avcodec_parameters_to_context(pCodecCtx, stream->codecpar);
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context for stream #%u\n", i);
            return ret;
        }

        /* Reencode video & audio and remux subtitles etc. */
        if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO || pCodecCtx->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO)
                pCodecCtx->framerate = av_guess_frame_rate(inFormatCtx, stream, NULL);

            /* Open decoder */
            ret = avcodec_open2(pCodecCtx, dec, NULL);
            if (ret < 0)
            {
                av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
                return ret;
            }
        }
        streamCtx[i].decodeCtx = pCodecCtx;
    }

    av_dump_format(inFormatCtx, 0, filename, 0);
    return 0;
}

static int
openOutputFile(const char *filename)
{
    AVStream *out_stream;
    AVStream *in_stream;
    AVCodecContext *decodeCtx, *pEncodeCtx;
    AVCodec *encoder;
    int ret;
    unsigned int i;

    outFormatCtx = NULL;
    avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, filename);
    if (!outFormatCtx)
    {
        av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
        return AVERROR_UNKNOWN;
    }

    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        out_stream = avformat_new_stream(outFormatCtx, NULL);
        if (!out_stream)
        {
            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
            return AVERROR_UNKNOWN;
        }

        in_stream = inFormatCtx->streams[i];
        decodeCtx = streamCtx[i].decodeCtx;

        if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO || decodeCtx->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            /* in this example, we choose transcoding to same codec */
            encoder = avcodec_find_encoder(decodeCtx->codec_id);
            if (!encoder)
            {
                av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
                return AVERROR_INVALIDDATA;
            }
            pEncodeCtx = avcodec_alloc_context3(encoder);
            if (!pEncodeCtx)
            {
                av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
                return AVERROR(ENOMEM);
            }

            /* In this example, we transcode to same properties (picture size,
             * sample rate etc.). These properties can be changed for output
             * streams easily using filters */
            if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO)
            {
                printf("DECODE CONTEXT "); dumpCodecContext(decodeCtx);

                // MAKE IT SMALLER!
                pEncodeCtx->height = decodeCtx->height / 4;
                pEncodeCtx->width  = decodeCtx->width  / 4;
                pEncodeCtx->sample_aspect_ratio = decodeCtx->sample_aspect_ratio;

                // perhaps set these too?
                pEncodeCtx->bit_rate = 700000;
                pEncodeCtx->bit_rate_tolerance = 0;
                pEncodeCtx->framerate = decodeCtx->framerate;
                pEncodeCtx->time_base = decodeCtx->time_base;

                printf("ENCODE CONTEXT "); dumpCodecContext(pEncodeCtx);

                /* take first format from list of supported formats */
                if (encoder->pix_fmts)
                    pEncodeCtx->pix_fmt = encoder->pix_fmts[0];
                else
                    pEncodeCtx->pix_fmt = decodeCtx->pix_fmt;

                /* video time_base can be set to whatever is handy and supported by encoder */
                pEncodeCtx->time_base = av_inv_q(decodeCtx->framerate);
            }
            else
            {
                pEncodeCtx->sample_rate    = decodeCtx->sample_rate;
                pEncodeCtx->channel_layout = decodeCtx->channel_layout;
                pEncodeCtx->channels = av_get_channel_layout_nb_channels(pEncodeCtx->channel_layout);
                /* take first format from list of supported formats */
                pEncodeCtx->sample_fmt = encoder->sample_fmts[0];
                pEncodeCtx->time_base  = (AVRational) { 1, pEncodeCtx->sample_rate };
            }

            /* Third parameter can be used to pass settings to encoder */
            ret = avcodec_open2(pEncodeCtx, encoder, NULL);
            if (ret < 0)
            {
                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
                return ret;
            }

            ret = avcodec_parameters_from_context(out_stream->codecpar, pEncodeCtx);
            if (ret < 0)
            {
                av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
                return ret;
            }

            if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
                pEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

            out_stream->time_base = pEncodeCtx->time_base;
            streamCtx[i].encodeCtx = pEncodeCtx;
        }
        else if (decodeCtx->codec_type == AVMEDIA_TYPE_UNKNOWN)
        {
            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
            return AVERROR_INVALIDDATA;
        }
        else
        {
            printf("STREAM %d is not video or audio\n", i);

            /* if this stream must be remuxed */
            ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
            if (ret < 0)
            {
                av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
                return ret;
            }
            out_stream->time_base = in_stream->time_base;
        }

    }
    av_dump_format(outFormatCtx, 0, filename, 1);

    if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&outFormatCtx->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
            return ret;
        }
    }

    /* init muxer, write output file header */
    ret = avformat_write_header(outFormatCtx, NULL);
    if (ret < 0)
    {
        av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
        return ret;
    }

    return 0;
}

static int
encodeAndWriteFrame(AVFrame *inFrame, unsigned int streamIndex, int *pGotFrame)
{
    int      ret;
    int      got_frame_local;
    AVPacket encodedPkt;

    if (pGotFrame == 0)
        pGotFrame = &got_frame_local;

    encodedPkt.data = NULL;
    encodedPkt.size = 0;
    av_init_packet(&encodedPkt);

    int codecType = inFormatCtx->streams[streamIndex]->codecpar->codec_type;

    if (codecType == AVMEDIA_TYPE_VIDEO)
        ret = avcodec_encode_video2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);
    else
        ret = avcodec_encode_audio2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);

    if (ret < 0)
        return ret;

    if (*pGotFrame == 0)
        return 0;

    if (encFrameNum++ % 10 == 0)
        printf("Encoded %s frame #%d\n", (codecType == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio", encFrameNum);

    /* prepare packet for muxing */
    encodedPkt.stream_index = streamIndex;
    av_packet_rescale_ts(&encodedPkt, streamCtx[streamIndex].encodeCtx->time_base, outFormatCtx->streams[streamIndex]->time_base);

    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");

    /* mux encoded frame */
    ret = av_interleaved_write_frame(outFormatCtx, &encodedPkt);
    return ret;
}


static int
flushEncoder(unsigned int streamIndex)
{
    int ret;
    int got_frame;

    if (!(streamCtx[streamIndex].encodeCtx->codec->capabilities & AV_CODEC_CAP_DELAY))
        return 0;

    while (1)
    {
        av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", streamIndex);
        ret = encodeAndWriteFrame(NULL, streamIndex, &got_frame);
        if (ret < 0)
            break;
        if (!got_frame)
            return 0;
    }
    return ret;
}

int
main(int argc, char **argv)
{
    int      ret;
    AVPacket packet = { .data = NULL, .size = 0 };
    AVFrame          *pDecFrame = NULL;
    enum AVMediaType type;
    unsigned int     streamIndex;
    unsigned int     i;
    int              gotDecFrame;

    if (argc != 3)
    {
        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
        return 1;
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = openInputFile(argv[1])) < 0)
        goto end;
    if ((ret = openOutputFile(argv[2])) < 0)
        goto end;

    /* read all packets */
    while (1)
    {
        if ((ret = av_read_frame(inFormatCtx, &packet)) < 0)
        {
            printf(ANSI_COLOR_YELLOW "READ PACKET RETURNED %d\n" ANSI_COLOR_RESET, ret);
            break;
        }
        readPktNum++;

        streamIndex = packet.stream_index;
        type = inFormatCtx->streams[packet.stream_index]->codecpar->codec_type;
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIndex);

        if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)
        {
            pDecFrame = av_frame_alloc();
            if (!pDecFrame)
            {
                ret = AVERROR(ENOMEM);
                break;
            }

            av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, streamCtx[streamIndex].decodeCtx->time_base);

            if (type == AVMEDIA_TYPE_VIDEO)
                ret = avcodec_decode_video2(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);
            else
                ret = avcodec_decode_audio4(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);

            if (ret < 0)
            {
                av_frame_free(&pDecFrame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }

            if (gotDecFrame)
            {
                if (decFrameNum++ % 10 == 0)
                    printf("Decoded %s frame #%d\n", (type == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio", decFrameNum);

                if (0 && type == AVMEDIA_TYPE_VIDEO)
                {
                    printf("VIDEO width %d height %d\n", pDecFrame->width, pDecFrame->height);
                    writeAVFrameAsYUVFile("/mnt/swdevel/DVStor/decodedYUV.yuv", pDecFrame);
                }

                pDecFrame->pts = av_frame_get_best_effort_timestamp(pDecFrame);

                ret = encodeAndWriteFrame(pDecFrame, streamIndex, 0);
                av_frame_free(&pDecFrame);
                if (ret < 0)
                    goto end;
            }
            else
                av_frame_free(&pDecFrame);
        }
        else
        {
            /* remux this frame without reencoding */
            av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, outFormatCtx->streams[streamIndex]->time_base);

            ret = av_interleaved_write_frame(outFormatCtx, &packet);
            if (ret < 0)
                goto end;
        }
        av_packet_unref(&packet);
    }

    printf(ANSI_COLOR_YELLOW "EXIT MAIN WHILE(1) - FLUSHING\n" ANSI_COLOR_RESET);

    /* flush encoders */
    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        /* flush encoder */
        ret = flushEncoder(i);
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
            goto end;
        }
    }

    av_write_trailer(outFormatCtx);

end:
    av_packet_unref(&packet);
    av_frame_free(&pDecFrame);
    for (i = 0; i < inFormatCtx->nb_streams; i++)
    {
        avcodec_free_context(&streamCtx[i].decodeCtx);
        if (outFormatCtx && outFormatCtx->nb_streams > i && outFormatCtx->streams[i] && streamCtx[i].encodeCtx)
            avcodec_free_context(&streamCtx[i].encodeCtx);
    }

    av_free(streamCtx);
    avformat_close_input(&inFormatCtx);

    if (outFormatCtx && !(outFormatCtx->oformat->flags & AVFMT_NOFILE))
        avio_closep(&outFormatCtx->pb);

    avformat_free_context(outFormatCtx);

    if (ret < 0)
        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));

    return ret ? 1 : 0;
}

0 个答案:

没有答案