如何在Android上使用FFMPEG API以30 fps提取帧?

时间:2016-09-06 10:41:13

标签: android performance video ffmpeg frame-rate

我们正在开发一个消费FFMPEG库以在Android平台上提取视频帧的项目。

在Windows上,我们观察到:

  • 使用CLI,ffmpeg能够使用命令ffmpeg -i input.flv -vf fps=1 out%d.png以30 fps提取帧。
  • 使用Xuggler,我们能够以30 fps的速度提取帧。
  • 直接在代码中使用FFMPEG API,我们得到的帧数为30 fps。

但是当我们直接在Android(See Hardware Details)上使用FFMPEG API时,我们会得到以下结果:

  • 720p视频(1280 x 720) - 16 fps(约60 ms /帧)
  • 1080p视频(1920 x 1080) - 7 fps(约140 ms /帧)

我们尚未在Android上测试过Xuggler / CLI。

理想情况下,我们应该能够以恒定的时间(约30毫秒/帧)获取数据。

我们如何在Android上获得30 fps?

在Android上使用的代码:

if (avformat_open_input(&pFormatCtx, pcVideoFile, NULL, NULL)) {
    iError = -1;  //Couldn't open file
}

if (!iError) {
    //Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
        iError = -2; //Couldn't find stream information
}

//Find the first video stream
if (!iError) {

    for (i = 0; i < pFormatCtx->nb_streams; i++) {
        if (AVMEDIA_TYPE_VIDEO
                == pFormatCtx->streams[i]->codec->codec_type) {
            iFramesInVideo = pFormatCtx->streams[i]->nb_index_entries;
            duration = pFormatCtx->streams[i]->duration;
            begin = pFormatCtx->streams[i]->start_time;
            time_base = (pFormatCtx->streams[i]->time_base.num * 1.0f)
                    / pFormatCtx->streams[i]->time_base.den;

            pCodecCtx = avcodec_alloc_context3(NULL);
            if (!pCodecCtx) {
                iError = -6;
                break;
            }

            AVCodecParameters params = { 0 };
            iReturn = avcodec_parameters_from_context(&params,
                    pFormatCtx->streams[i]->codec);
            if (iReturn < 0) {
                iError = -7;
                break;
            }

            iReturn = avcodec_parameters_to_context(pCodecCtx, &params);
            if (iReturn < 0) {
                iError = -7;
                break;
            }

            //pCodecCtx = pFormatCtx->streams[i]->codec;

            iVideoStreamIndex = i;
            break;
        }
    }
}

if (!iError) {
    if (iVideoStreamIndex == -1) {
        iError = -3; // Didn't find a video stream
    }
}

if (!iError) {
    // Find the decoder for the video stream
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        iError = -4;
    }
}

if (!iError) {
    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
        iError = -5;
}

if (!iError) {
    iNumBytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
            pCodecCtx->height, 1);

    // initialize SWS context for software scaling
    sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
            pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
            AV_PIX_FMT_RGB24,
            SWS_BILINEAR,
            NULL,
            NULL,
            NULL);
    if (!sws_ctx) {
        iError = -7;
    }
}
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
delta_us = (end.tv_sec - start.tv_sec) * 1000000
        + (end.tv_nsec - start.tv_nsec) / 1000;
start = end;
//LOGI("Starting_Frame_Extraction: %lld", delta_us);
if (!iError) {
    while (av_read_frame(pFormatCtx, &packet) == 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == iVideoStreamIndex) {
            pFrame = av_frame_alloc();
            if (NULL == pFrame) {
                iError = -8;
                break;
            }

            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &iFrameFinished,
                    &packet);
            if (iFrameFinished) {
                //OUR CODE
            }
            av_frame_free(&pFrame);
            pFrame = NULL;
        }
        av_packet_unref(&packet);
    }
}

2 个答案:

答案 0 :(得分:1)

您需要libavfilter中的一些结构和功能。

vf选项表示&#34;视频过滤器&#34;。无论原始视频fps如何,命令行ffmpeg -i input -vf fps=30 out%d.png都会输出video_length_in_seconds * 30。这意味着如果视频为25 fps,您将获得一些重复的帧。如果视频超过30 fps,则会丢失一些帧。

要实现这一点,您必须初始化一些过滤器上下文。请参阅ffmpeg source的filtering_video.c示例。

AVFilter* buffersrc  = avfilter_get_by_name("buffer");
AVFilter* buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut* outputs = avfilter_inout_alloc();
AVFilterInOut* inputs  = avfilter_inout_alloc();
AVRational time_base = p_format_ctx->streams[video_stream]->time_base;
enum AVPixelFormat pix_fmts[] = { p_codec_ctx->pix_fmt, AV_PIX_FMT_NONE };

filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
    // failed, goto cleanup
}

char args[512];
snprintf(args, sizeof(args),
         "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
         p_codec_ctx->width, p_codec_ctx->height, p_codec_ctx->pix_fmt,
         time_base.num, time_base.den,
         p_codec_ctx->sample_aspect_ratio.num, p_codec_ctx->sample_aspect_ratio.den);

int ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                       args, NULL, filter_graph);

if (ret < 0) {
    LOG(ERROR) << "Cannot create buffer source";
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);
    return false;
}

ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                   NULL, NULL, filter_graph);
if (ret < 0) {
    // failed... blabla
}

ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
                          AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
    // failed... blabla
}

outputs->name       = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx    = 0;
outputs->next       = NULL;

inputs->name        = av_strdup("out");
inputs->filter_ctx  = buffersink_ctx;
inputs->pad_idx     = 0;
inputs->next        = NULL;

const char* filter_description[256] = "fps=fps=30";

if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr.c_str(),
                                    &inputs, &outputs, NULL)) < 0) {
    // failed...
}

if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
    // failed...
}

好的,这都需要初始化。

在解码部分添加一些代码:

avcodec_decode_video2(p_codec_ctx, p_frame, &got_frame, &packet);
if (*got_frame) {
    p_frame->pts = av_frame_get_best_effort_timestamp(p_frame);
    if (av_buffersrc_add_frame_flags(buffersrc_ctx, p_frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
        // failed... blabla
    }
    while (1) {
        int ret = av_buffersink_get_frame(buffersink_ctx, p_frame_stage);  
        // p_frame_stage is a AVFrame struct. Same size as p_frame. Need to allocated before.
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            break;
        if (ret < 0) {
            // something wrong. filter failed.            
        }
        // Do something with p_frame_stage here.
    }
}

答案 1 :(得分:0)

请看一下https://gitter.im/mobile-ffmpeg/Lobby?at=5c5bb384f04ef00644f1bb4e,在下面几行中,它们提到了加速进程的选项,例如... -preset ultrafast,-threads 10,-tune zerolatency,-x264-params sliced-threads = 1