如何在c程序中使用libffmpeg连接mp4文件?

时间:2013-07-31 19:30:20

标签: android c ffmpeg

我知道ffmpeg命令行很简单,但是如何以编程方式实现?我不擅长这个,这里有一些来自互联网的代码,它用于将.mp4转换为.ts,我做了一些修改,但是音频流问题仍然存在:

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>

#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/avutil.h"
#include "libavutil/rational.h"
#include "libavdevice/avdevice.h"
#include "libavutil/mathematics.h"
#include "libswscale/swscale.h"

static AVStream* add_output_stream(AVFormatContext* output_format_context, AVStream* input_stream)
{

    AVCodecContext* input_codec_context = NULL;
    AVCodecContext* output_codec_context = NULL;

    AVStream* output_stream = NULL;
    output_stream = av_new_stream(output_format_context, 0);
    if (!output_stream)
    {
        printf("Call av_new_stream function failed\n");
        return NULL;
    }

    input_codec_context = input_stream->codec;
    output_codec_context = output_stream->codec;

    output_codec_context->codec_id = input_codec_context->codec_id;
    output_codec_context->codec_type = input_codec_context->codec_type;
    output_codec_context->codec_tag = input_codec_context->codec_tag;
    output_codec_context->bit_rate = input_codec_context->bit_rate;
    output_codec_context->extradata = input_codec_context->extradata;
    output_codec_context->extradata_size = input_codec_context->extradata_size;

    if (av_q2d(input_codec_context->time_base) * input_codec_context->ticks_per_frame > av_q2d(input_stream->time_base) && av_q2d(input_stream->time_base) < 1.0 / 1000)
    {
        output_codec_context->time_base = input_codec_context->time_base;
        output_codec_context->time_base.num *= input_codec_context->ticks_per_frame;
    }
    else
    {
        output_codec_context->time_base = input_stream->time_base;
    }
    switch (input_codec_context->codec_type)
    {
    case AVMEDIA_TYPE_AUDIO:
        output_codec_context->channel_layout = input_codec_context->channel_layout;
        output_codec_context->sample_rate = input_codec_context->sample_rate;
        output_codec_context->channels = input_codec_context->channels;
        output_codec_context->frame_size = input_codec_context->frame_size;
        if ((input_codec_context->block_align == 1 && input_codec_context->codec_id == CODEC_ID_MP3) || input_codec_context->codec_id == CODEC_ID_AC3)
        {
            output_codec_context->block_align = 0;
        }
        else
        {
            output_codec_context->block_align = input_codec_context->block_align;
        }
        break;
    case AVMEDIA_TYPE_VIDEO:
        output_codec_context->pix_fmt = input_codec_context->pix_fmt;
        output_codec_context->width = input_codec_context->width;
        output_codec_context->height = input_codec_context->height;
        output_codec_context->has_b_frames = input_codec_context->has_b_frames;
        if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
        {
            output_codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
        }
        break;
    default:
        break;
    }

    return output_stream;
}

//[[** from ffmpeg.c
static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
    int ret;

    while(bsfc){
        AVPacket new_pkt= *pkt;
        int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
                                          &new_pkt.data, &new_pkt.size,
                                          pkt->data, pkt->size,
                                          pkt->flags & AV_PKT_FLAG_KEY);
        if(a>0){
            av_free_packet(pkt);
            new_pkt.destruct= av_destruct_packet;
        } else if(a<0){
            fprintf(stderr, "%s failed for stream %d, codec %s\n",
                    bsfc->filter->name, pkt->stream_index,
                    avctx->codec ? avctx->codec->name : "copy");
            //print_error("", a);
            //if (exit_on_error)
            //    ffmpeg_exit(1);
        }
        *pkt= new_pkt;

        bsfc= bsfc->next;
    }

    ret= av_interleaved_write_frame(s, pkt);
    if(ret < 0){
        //print_error("av_interleaved_write_frame()", ret);
        fprintf(stderr, "av_interleaved_write_frame(%d)\n", ret);
        exit(1);
    }
}
//]]**

int main(int argc, char* argv[])
{

    const char* input;
    const char* output;
    const char* output_prefix = NULL;
    char* segment_duration_check = 0;
    const char* index = NULL;
    char* tmp_index = NULL;
    const char* http_prefix = NULL;
    long max_tsfiles = NULL;
    double prev_segment_time = 0;
    double segment_duration = 0;

    AVInputFormat* ifmt = NULL;
    AVOutputFormat* ofmt = NULL;
    AVFormatContext* ic = NULL;
    AVFormatContext* oc = NULL;
    AVStream* video_st = NULL;
    AVStream* audio_st = NULL;
    AVCodec* codec = NULL;
    AVDictionary* pAVDictionary = NULL;

    long frame_count = 0;

    if (argc != 3) {
        fprintf(stderr, "Usage: %s inputfile outputfile\n", argv[0]);
        exit(1);
    }

    input = argv[1];
    output = argv[2];

    av_register_all();

    char szError[256] = {0};
    int nRet = avformat_open_input(&ic, input, ifmt, &pAVDictionary);
    if (nRet != 0)
    {
        av_strerror(nRet, szError, 256);
        printf(szError);
        printf("\n");
        printf("Call avformat_open_input function failed!\n");
        return 0;
    }

    if (av_find_stream_info(ic) < 0)
    {
        printf("Call av_find_stream_info function failed!\n");
        return 0;
    }

    ofmt = av_guess_format("mpegts", NULL, NULL);
    if (!ofmt)
    {
        printf("Call av_guess_format function failed!\n");
        return 0;
    }

    oc = avformat_alloc_context();
    if (!oc)
    {
        printf("Call av_guess_format function failed!\n");
        return 0;
    }
    oc->oformat = ofmt;

    int video_index = -1, audio_index = -1;
    for (unsigned int i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++)
    {
        switch (ic->streams[i]->codec->codec_type)
        {
        case AVMEDIA_TYPE_VIDEO:
            video_index = i;
            ic->streams[i]->discard = AVDISCARD_NONE;
            video_st = add_output_stream(oc, ic->streams[i]);
            break;
        case AVMEDIA_TYPE_AUDIO:
            audio_index = i;
            ic->streams[i]->discard = AVDISCARD_NONE;
            audio_st = add_output_stream(oc, ic->streams[i]);
            break;
        default:
            ic->streams[i]->discard = AVDISCARD_ALL;
            break;
        }
    }
    codec = avcodec_find_decoder(video_st->codec->codec_id);
    if (codec == NULL)
    {
        printf("Call avcodec_find_decoder function failed!\n");
        return 0;
    }

    if (avcodec_open(video_st->codec, codec) < 0)
    {
        printf("Call avcodec_open function failed !\n");
        return 0;
    }

    if (avio_open(&oc->pb, output, AVIO_FLAG_WRITE) < 0)
    {
        return 0;
    }

    if (avformat_write_header(oc, &pAVDictionary))
    {
        printf("Call avformat_write_header function failed.\n");
        return 0;
    }

    //[[++
    AVBitStreamFilterContext *bsfc = av_bitstream_filter_init("h264_mp4toannexb");
    //AVBitStreamFilterContext *absfc = av_bitstream_filter_init("aac_adtstoasc");
    if (!bsfc) {
        fprintf(stderr, "bsf init error!\n");
        return -1;
    }
    //]]++

    int decode_done = 0;
    do
    {
        double segment_time = 0;
        AVPacket packet;

        decode_done = av_read_frame(ic, &packet);
        if (decode_done < 0)
            break;

        if (av_dup_packet(&packet) < 0)
        {
            printf("Call av_dup_packet function failed\n");
            av_free_packet(&packet);
            break;
        }

        //[[**
        if (packet.stream_index == audio_index) {
            segment_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
            nRet = av_interleaved_write_frame(oc, &packet);   
        } else if (packet.stream_index == video_index) {
            if (packet.flags & AV_PKT_FLAG_KEY) {
                segment_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
            } else {
                segment_time = prev_segment_time;
            }
            //nRet = av_interleaved_write_frame(oc, &packet);  
            write_frame(oc, &packet, video_st->codec, bsfc);
        }
        //]]**

        if (nRet < 0)
        {
            printf("Call av_interleaved_write_frame function failed: %d\n", nRet);
        }
        else if (nRet > 0)
        {
            printf("End of stream requested\n");
            av_free_packet(&packet);
            break;
        }
        av_free_packet(&packet);
        frame_count++;
    }while(!decode_done);

    av_write_trailer(oc);
    printf("frame_count = %d\n", frame_count);

    av_bitstream_filter_close(bsfc);  
    avcodec_close(video_st->codec);
    for(unsigned int k = 0; k < oc->nb_streams; k++)
    {
        av_freep(&oc->streams[k]->codec);
        av_freep(&oc->streams[k]);
    }
    av_free(oc);
    //getchar();
    return 0;
}

编译此代码,获得名为muxts的可执行文件,然后:

$ ./muxts vid1.mp4 vid1.ts

未打印任何错误消息,但音频流未同步且出现噪音。请使用ffmpeg检查.ts文件:

$ ffmpeg -i vid1.ts
ffmpeg version 0.8.14-tessus, Copyright (c) 2000-2013 the FFmpeg developers
  built on Jul 29 2013 17:05:18 with llvm_gcc 4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2336.1.00)
  configuration: --prefix=/usr/local --arch=x86_64 --as=yasm --extra-version=tessus --enable-gpl --enable-nonfree --enable-version3 --disable-ffplay --enable-libvorbis --enable-libmp3lame --enable-libx264 --enable-libxvid --enable-bzlib --enable-zlib --enable-postproc --enable-filters --enable-runtime-cpudetect --enable-debug=3 --disable-optimizations
  libavutil    51.  9. 1 / 51.  9. 1
  libavcodec   53.  8. 0 / 53.  8. 0
  libavformat  53.  5. 0 / 53.  5. 0
  libavdevice  53.  1. 1 / 53.  1. 1
  libavfilter   2. 23. 0 /  2. 23. 0
  libswscale    2.  0. 0 /  2.  0. 0
  libpostproc  51.  2. 0 / 51.  2. 0

Seems stream 0 codec frame rate differs from container frame rate: 180000.00 (180000/1) -> 90000.00 (180000/2)
Input #0, mpegts, from 'vid1.ts':
  Duration: 00:00:03.75, start: 0.000000, bitrate: 3656 kb/s
  Program 1
    Metadata:
      service_name    : Service01
      service_provider: FFmpeg
    Stream #0.0[0x100]: Video: h264 (Baseline), yuv420p, 640x480, 90k tbr, 90k tbn, 180k tbc
    Stream #0.1[0x101]: Audio: aac, 48000 Hz, mono, s16, 190 kb/s
At least one output file must be specified

我该怎么办?

如果此问题已修复,我如何将多个.ts文件连接到单个.mp4文件?

0 个答案:

没有答案