FFmpeg mux视频使用libavformat avcodec但输出无法播放

时间:2017-08-02 13:52:07

标签: android c ffmpeg

我正在尝试编写一个应用输入视频并将其裁剪为方形视频并忽略音频流的应用。因为如果使用命令会导致性能不佳,我尝试使用libavcodec和libavformat来执行此操作。但输出不能被任何视频播放器播放,持续时间为0虽然我写了所有帧。这是我的代码。

void convert_video(char* input) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStreamIndex;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVFrame         *pFrameSquare = NULL;
    AVPacket        packet, outPacket;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer = NULL;
    AVCodec         *pEncodec = NULL;
    AVFormatContext *poFormatCxt = NULL;
    MuxOutputStream    videoStream = {0}, audioStream = {0};
    int tar_w, tar_h;

    const enum AVPixelFormat pic_format = AV_PIX_FMT_YUV420P;
    const enum AVCodecID codec_id = AV_CODEC_ID_H264;
    AVDictionary    *optionsDict = NULL;
    char output[50];
    sprintf(output, "%soutput.mp4", ANDROID_SDCARD);

    // Register all formats and codecs
    av_register_all();

    // Open video file
    if(avformat_open_input(&pFormatCtx, input, NULL, NULL)!=0)
        return; // Couldn't open file
    avformat_alloc_output_context2(&poFormatCxt, NULL, NULL, output);

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return; // Couldn't find stream information

    // Find the first video stream
    videoStreamIndex=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStreamIndex=i;
            break;
        }
    if(videoStreamIndex==-1)
        return; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx = pFormatCtx->streams[videoStreamIndex]->codec;
    tar_w = pCodecCtx->width > pCodecCtx->height ? pCodecCtx->height : pCodecCtx->width;
    tar_h = tar_w;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    pEncodec = avcodec_find_encoder(codec_id);

    add_stream_mux(&videoStream, poFormatCxt, &pEncodec, codec_id, tar_w, tar_h);
    videoStream.st[0].time_base = pFormatCtx->streams[videoStreamIndex]->time_base;
    videoStream.st[0].codec->time_base = videoStream.st[0].time_base;
    videoStream.st[0].codec->time_base.den *= videoStream.st[0].codec->ticks_per_frame;
//    add_stream(&audioStream, poFormatCxt, &)
    open_video(poFormatCxt, pEncodec, &videoStream, optionsDict);
    int ret = avio_open(&poFormatCxt->pb, output, AVIO_FLAG_WRITE);

    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, &optionsDict) < 0)
        return; // Could not open codec

    ret = avformat_write_header(poFormatCxt, &optionsDict);
    if (ret != 0) {
        ANDROID_LOG("Died");
    }

    // Allocate video frame
    pFrame=av_frame_alloc();
    pFrame->format = videoStream.st->codec->pix_fmt;
    pFrame->width = pCodecCtx->width;
    pFrame->height = pCodecCtx->height;
    av_frame_get_buffer(pFrame, 32);

    // Allocate an AVFrame structure
    pFrameSquare=av_frame_alloc();
    if(pFrameSquare==NULL)
        return;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(pic_format, tar_w,
                                tar_h);
    buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameSquare
    // Note that pFrameSquare is an AVFrame, but AVFrame is a superset
    // of AVPicture
    ret = avpicture_fill((AVPicture *)pFrameSquare, buffer, pic_format,
                   tar_w, tar_h);
    if (ret < 0) {
        ANDROID_LOG("Can't fill picture");
        return;
    }

    // Read frames and save first five frames to disk
    i=0;
    ret = av_read_frame(pFormatCtx, &packet);
    while(ret >= 0) {
        // Is this a packet from the video stream?
        if(packet.stream_index == videoStreamIndex) {
            // Decode video frame
//            av_packet_rescale_ts(&packet, videoStream.st->time_base, videoStream.st->codec->time_base);
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                                  &packet);
//            while (!frameFinished) {
//                avcodec_decode_video2(videoStream.st->codec, pFrame, &frameFinished, NULL);
//            }
            ANDROID_LOG("Trying to decode frame %d with result %d", i, frameFinished);
            ret = av_picture_crop((AVPicture*) pFrameSquare, (AVPicture*) pFrame, pic_format, 0, 0);
            if (ret < 0) {
                ANDROID_LOG("Can't crop image");
            }
//            av_frame_get_best_effort_timestamp(pFrame);
//            av_rescale_q()

            if(frameFinished) {

                // Save the frame to disk
                av_init_packet(&outPacket);
//                av_packet_rescale_ts(&outPacket, videoStream.st->codec->time_base, videoStream.st->time_base);
                pFrameSquare->width = tar_w;
                pFrameSquare->height = tar_h;
                pFrameSquare->format = pic_format;
                pFrameSquare->pts = ++videoStream.next_pts;
                ret = avcodec_encode_video2(videoStream.st->codec, &outPacket, pFrameSquare, &frameFinished);

//                int count = 0;
//                while (!frameFinished && count++ < 6) {
//                    ret = avcodec_encode_video2(videoStream.st->codec, &outPacket, NULL, &frameFinished);
//                }
                if (frameFinished) {
                    ANDROID_LOG("Writing frame %d", i);
                    outPacket.stream_index = videoStreamIndex;
                    av_interleaved_write_frame(poFormatCxt, &outPacket);
                }
                av_free_packet(&outPacket);
            }
        }

        // Free the packet that was allocated by av_read_frameav_free_packet(&packet);
        ret = av_read_frame(pFormatCtx, &packet);
    }

    ret = av_write_trailer(poFormatCxt);
    if (ret < 0) {
        ANDROID_LOG("Couldn't write trailer");
    } else {
        ANDROID_LOG("Video convert finished");
    }

    // Free the RGB image
    av_free(buffer);
    av_free(pFrameSquare);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);
//    avcodec_close(pEncodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return;
}

辅助

#define STREAM_DURATION   10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */

/* Add an output stream. */
void add_stream_mux(MuxOutputStream *ost, AVFormatContext *oc,
                       AVCodec **codec,
                       enum AVCodecID codec_id, int width, int heigh)
{
    AVCodecContext *codecCtx;
    int i;
    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec)) {
        fprintf(stderr, "Could not find encoder for '%s'\n",
                avcodec_get_name(codec_id));
        exit(1);
    }
    ost->st = avformat_new_stream(oc, *codec);
    if (!ost->st) {
        fprintf(stderr, "Could not allocate stream\n");
        exit(1);
    }
    ost->st->id = oc->nb_streams-1;
    codecCtx = ost->st->codec;
    switch ((*codec)->type) {
        case AVMEDIA_TYPE_AUDIO:
            codecCtx->sample_fmt  = (*codec)->sample_fmts ?
                             (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
            codecCtx->bit_rate    = 64000;
            codecCtx->sample_rate = 44100;
            if ((*codec)->supported_samplerates) {
                codecCtx->sample_rate = (*codec)->supported_samplerates[0];
                for (i = 0; (*codec)->supported_samplerates[i]; i++) {
                    if ((*codec)->supported_samplerates[i] == 44100)
                        codecCtx->sample_rate = 44100;
                }
            }
            codecCtx->channels        = av_get_channel_layout_nb_channels(codecCtx->channel_layout);
            codecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
            if ((*codec)->channel_layouts) {
                codecCtx->channel_layout = (*codec)->channel_layouts[0];
                for (i = 0; (*codec)->channel_layouts[i]; i++) {
                    if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
                        codecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
                }
            }
            codecCtx->channels        = av_get_channel_layout_nb_channels(codecCtx->channel_layout);
            ost->st->time_base = (AVRational){ 1, codecCtx->sample_rate };
            break;
        case AVMEDIA_TYPE_VIDEO:
            codecCtx->codec_id = codec_id;
            codecCtx->bit_rate = 400000;
            /* Resolution must be a multiple of two. */
            codecCtx->width    = width;
            codecCtx->height   = heigh;
            /* timebase: This is the fundamental unit of time (in seconds) in terms
             * of which frame timestamps are represented. For fixed-fps content,
             * timebase should be 1/framerate and timestamp increments should be
             * identical to 1. */
            ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
            codecCtx->time_base       = ost->st->time_base;
            codecCtx->gop_size      = 12; /* emit one intra frame every twelve frames at most */
            codecCtx->pix_fmt       = STREAM_PIX_FMT;
            if (codecCtx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
                /* just for testing, we also add B frames */
                codecCtx->max_b_frames = 2;
            }
            if (codecCtx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
                /* Needed to avoid using macroblocks in which some coeffs overflow.
                 * This does not happen with normal video, it just happens here as
                 * the motion of the chroma plane does not match the luma plane. */
                codecCtx->mb_decision = 2;
            }
            break;
        default:
            break;
    }
    /* Some formats want stream headers to be separate. */
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, MuxOutputStream *ost, AVDictionary *opt_arg)
{
    int ret;
    AVCodecContext *c = ost->st->codec;
    AVDictionary *opt = NULL;
    av_dict_copy(&opt, opt_arg, 0);
    /* open the codec */
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
        exit(1);
    }
    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    /* If the output format is not YUV420P, then a temporary YUV420P
     * picture is needed too. It is then converted to the required
     * output format. */
    ost->tmp_frame = NULL;
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
        if (!ost->tmp_frame) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}

我担心我设置了错误的pts或time_base的帧,而且在解码或编码时,我看到第一帧丢失,frameFinished为0.查看我要冲洗的帖子解码器avcodec_decode_video2(videoStream.st->codec, pFrame, &frameFinished, NULL)但是在尝试几次后,frameFinished仍为0,而avcodec_encode_video2(videoStream.st->codec, &outPacket, NULL, &frameFinished)将在下一个编码帧中抛出错误。那么我怎么能得到丢失的所有帧?我使用FFmpeg 3.0.1版

1 个答案:

答案 0 :(得分:1)

设置输出数据包的信息后,视频可播放

outPacket.pts = av_rescale_q(packet.pts,
                                             pFormatCtx->streams[videoStreamIndex]->time_base,
                                             videoStream.st[videoStreamIndex].time_base);
                outPacket.dts = av_rescale_q(packet.dts,
                                             pFormatCtx->streams[videoStreamIndex]->time_base,
                                             videoStream.st[videoStreamIndex].time_base);
                outPacket.duration = av_rescale_q(packet.duration,
                                                  pFormatCtx->streams[videoStreamIndex]->time_base,
                                                  videoStream.st[videoStreamIndex].time_base);