在iOS中使用ffmpeg记录rtsp流

时间:2015-12-10 04:02:18

标签: objective-c ffmpeg video-streaming rtsp

我跟着iFrameExtractor成功地在我的快速项目中流式传输rtsp。在这个项目中,它还具有录音功能。它主要使用avformat_write_headerav_interleaved_write_frameav_write_trailer将rtsp源保存到mp4文件中。

当我在我的设备中使用这个项目时,rtsp流工作正常,但录制功能将始终生成一个没有图像和声音的空白mp4文件。

有谁能告诉我我错过了哪一步?

我正在使用iPhone5和iOS 9.1以及XCode 7.1.1。 ffmpeg是2.8.3版本,并遵循CompilationGuide – FFmpeg

的编译指令

以下是此项目中的示例代码

生成每一帧的功能:

-(BOOL)stepFrame {
// AVPacket packet;
int frameFinished=0;
static bool bFirstIFrame=false;
static int64_t vPTS=0, vDTS=0, vAudioPTS=0, vAudioDTS=0;

while(!frameFinished && av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {

        // 20130525 albert.liao modified start

        // Initialize a new format context for writing file
        if(veVideoRecordState!=eH264RecIdle)
        {
            switch(veVideoRecordState)
            {
                case eH264RecInit:
                {                        
                    if ( !pFormatCtx_Record )
                    {
                        int bFlag = 0;
                        //NSString *videoPath = [NSHomeDirectory() stringByAppendingPathComponent:@"Documents/test.mp4"];
                        NSString *videoPath = @"/Users/liaokuohsun/iFrameTest.mp4";

                        const char *file = [videoPath UTF8String];
                        pFormatCtx_Record = avformat_alloc_context();
                        bFlag = h264_file_create(file, pFormatCtx_Record, pCodecCtx, pAudioCodecCtx,/*fps*/0.0, packet.data, packet.size );

                        if(bFlag==true)
                        {
                            veVideoRecordState = eH264RecActive;
                            fprintf(stderr, "h264_file_create success\n");                                
                        }
                        else
                        {
                            veVideoRecordState = eH264RecIdle;
                            fprintf(stderr, "h264_file_create error\n");
                        }
                    }
                }
                //break;

                case eH264RecActive:
                {
                    if((bFirstIFrame==false) &&(packet.flags&AV_PKT_FLAG_KEY)==AV_PKT_FLAG_KEY)
                    {
                        bFirstIFrame=true;
                        vPTS = packet.pts ;
                        vDTS = packet.dts ;
#if 0
                        NSRunLoop *pRunLoop = [NSRunLoop currentRunLoop];
                        [pRunLoop addTimer:RecordingTimer forMode:NSDefaultRunLoopMode];
#else
                        [NSTimer scheduledTimerWithTimeInterval:5.0//2.0
                                                         target:self
                                                       selector:@selector(StopRecording:)
                                                       userInfo:nil
                                                        repeats:NO];
#endif
                    }

                    // Record audio when 1st i-Frame is obtained
                    if(bFirstIFrame==true)
                    {
                        if ( pFormatCtx_Record )
                        {
#if PTS_DTS_IS_CORRECT==1
                            packet.pts = packet.pts - vPTS;
                            packet.dts = packet.dts - vDTS;

#endif
                                h264_file_write_frame( pFormatCtx_Record, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);

                        }
                        else
                        {
                            NSLog(@"pFormatCtx_Record no exist");
                        }
                    }
                }
                break;

                case eH264RecClose:
                {
                    if ( pFormatCtx_Record )
                    {
                        h264_file_close(pFormatCtx_Record);
#if 0
                        // 20130607 Test
                        dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^(void)
                        {
                            ALAssetsLibrary *library = [[ALAssetsLibrary alloc]init];
                            NSString *filePathString = [NSHomeDirectory() stringByAppendingPathComponent:@"Documents/test.mp4"];
                            NSURL *filePathURL = [NSURL fileURLWithPath:filePathString isDirectory:NO];
                            if(1)// ([library videoAtPathIsCompatibleWithSavedPhotosAlbum:filePathURL])
                            {
                                [library writeVideoAtPathToSavedPhotosAlbum:filePathURL completionBlock:^(NSURL *assetURL, NSError *error){
                                    if (error) {
                                        // TODO: error handling
                                        NSLog(@"writeVideoAtPathToSavedPhotosAlbum error");
                                    } else {
                                        // TODO: success handling
                                        NSLog(@"writeVideoAtPathToSavedPhotosAlbum success");
                                    }
                                }];
                            }
                            [library release];
                        });
#endif
                        vPTS = 0;
                        vDTS = 0;
                        vAudioPTS = 0;
                        vAudioDTS = 0;
                        pFormatCtx_Record = NULL;
                        NSLog(@"h264_file_close() is finished");
                    }
                    else
                    {
                        NSLog(@"fc no exist");
                    }
                    bFirstIFrame = false;
                    veVideoRecordState = eH264RecIdle;

                }
                break;

                default:
                    if ( pFormatCtx_Record )
                    {
                        h264_file_close(pFormatCtx_Record);
                        pFormatCtx_Record = NULL;
                    }
                    NSLog(@"[ERROR] unexpected veVideoRecordState!!");
                    veVideoRecordState = eH264RecIdle;
                    break;
            }
        }

        // Decode video frame
        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
    }
    else if(packet.stream_index==audioStream)
    {
        // 20131024 albert.liao modfied start
        static int vPktCount=0;
        BOOL bIsAACADTS = FALSE;
        int ret = 0;

        if(aPlayer.vAACType == eAAC_UNDEFINED)
        {
            tAACADTSHeaderInfo vxAACADTSHeaderInfo = {0};
            bIsAACADTS = [AudioUtilities parseAACADTSHeader:(uint8_t *)packet.data ToHeader:&vxAACADTSHeaderInfo];
        }

        @synchronized(aPlayer)
        {
            if(aPlayer==nil)
            {
                aPlayer = [[AudioPlayer alloc]initAudio:nil withCodecCtx:(AVCodecContext *) pAudioCodecCtx];
                NSLog(@"aPlayer initAudio");

                if(bIsAACADTS)
                {
                    aPlayer.vAACType = eAAC_ADTS;
                    //NSLog(@"is ADTS AAC");
                }
            }
            else
            {
                if(vPktCount<5) // The voice is listened once image is rendered
                {
                    vPktCount++;
                }
                else
                {
                    if([aPlayer getStatus]!=eAudioRunning)
                    {
                        dispatch_async(dispatch_get_main_queue(), ^(void) {
                            @synchronized(aPlayer)
                            {
                                NSLog(@"aPlayer start play");
                                [aPlayer Play];
                            }

                        });
                    }
                }
            }
        };

        @synchronized(aPlayer)
        {
            int ret = 0;

            ret = [aPlayer putAVPacket:&packet];
            if(ret <= 0)
                NSLog(@"Put Audio Packet Error!!");

        }

        // 20131024 albert.liao modfied end

        if(bFirstIFrame==true)
        {
            switch(veVideoRecordState)
            {
                case eH264RecActive:
                {
                    if ( pFormatCtx_Record )
                    {
                        h264_file_write_audio_frame(pFormatCtx_Record, pAudioCodecCtx, packet.stream_index, packet.data, packet.size, packet.dts, packet.pts);

                    }
                    else
                    {
                        NSLog(@"pFormatCtx_Record no exist");
                    }
                }
            }
        }
    }
    else
    {
        //fprintf(stderr, "packet len=%d, Byte=%02X%02X%02X%02X%02X\n",\
                packet.size, packet.data[0],packet.data[1],packet.data[2],packet.data[3], packet.data[4]);
    }
    // 20130525 albert.liao modified end
}
return frameFinished!=0;
}

avformat_write_header:

int h264_file_create(const char *pFilePath, AVFormatContext *fc, AVCodecContext *pCodecCtx,AVCodecContext *pAudioCodecCtx, double fps, void *p, int len )
{
int vRet=0;
AVOutputFormat *of=NULL;
AVStream *pst=NULL;
AVCodecContext *pcc=NULL, *pAudioOutputCodecContext=NULL;

avcodec_register_all();
av_register_all();
av_log_set_level(AV_LOG_VERBOSE);

if(!pFilePath)
{
    fprintf(stderr, "FilePath no exist");
    return -1;
}

if(!fc)
{
    fprintf(stderr, "AVFormatContext no exist");
    return -1;
}
fprintf(stderr, "file=%s\n",pFilePath);

// Create container
of = av_guess_format( 0, pFilePath, 0 );
fc->oformat = of;
strcpy( fc->filename, pFilePath );

// Add video stream
pst = avformat_new_stream( fc, 0 );
vVideoStreamIdx = pst->index;
fprintf(stderr,"Video Stream:%d",vVideoStreamIdx);

pcc = pst->codec;
avcodec_get_context_defaults3( pcc, AVMEDIA_TYPE_VIDEO );

// Save the stream as origin setting without convert
pcc->codec_type = pCodecCtx->codec_type;
pcc->codec_id = pCodecCtx->codec_id;
pcc->bit_rate = pCodecCtx->bit_rate;
pcc->width = pCodecCtx->width;
pcc->height = pCodecCtx->height;

if(fps==0)
{
    double fps=0.0;
    AVRational pTimeBase;
    pTimeBase.num = pCodecCtx->time_base.num;
    pTimeBase.den = pCodecCtx->time_base.den;
    fps = 1.0/ av_q2d(pCodecCtx->time_base)/ FFMAX(pCodecCtx->ticks_per_frame, 1);
    fprintf(stderr,"fps_method(tbc): 1/av_q2d()=%g",fps);
    pcc->time_base.num = 1;
    pcc->time_base.den = fps;
}
else
{
    pcc->time_base.num = 1;
    pcc->time_base.den = fps;
}
// reference ffmpeg\libavformat\utils.c

// For SPS and PPS in avcC container
pcc->extradata = malloc(sizeof(uint8_t)*pCodecCtx->extradata_size);
memcpy(pcc->extradata, pCodecCtx->extradata, pCodecCtx->extradata_size);
pcc->extradata_size = pCodecCtx->extradata_size;

// For Audio stream
if(pAudioCodecCtx)
{
    AVCodec *pAudioCodec=NULL;
    AVStream *pst2=NULL;
    pAudioCodec = avcodec_find_encoder(AV_CODEC_ID_AAC);

    // Add audio stream
    pst2 = avformat_new_stream( fc, pAudioCodec );
    vAudioStreamIdx = pst2->index;
    pAudioOutputCodecContext = pst2->codec;
    avcodec_get_context_defaults3( pAudioOutputCodecContext, pAudioCodec );
    fprintf(stderr,"Audio Stream:%d",vAudioStreamIdx);
    fprintf(stderr,"pAudioCodecCtx->bits_per_coded_sample=%d",pAudioCodecCtx->bits_per_coded_sample);

    pAudioOutputCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
    pAudioOutputCodecContext->codec_id = AV_CODEC_ID_AAC;

    // Copy the codec attributes
    pAudioOutputCodecContext->channels = pAudioCodecCtx->channels;
    pAudioOutputCodecContext->channel_layout = pAudioCodecCtx->channel_layout;
    pAudioOutputCodecContext->sample_rate = pAudioCodecCtx->sample_rate;
    pAudioOutputCodecContext->bit_rate = 12000;//pAudioCodecCtx->sample_rate * pAudioCodecCtx->bits_per_coded_sample;
    pAudioOutputCodecContext->bits_per_coded_sample = pAudioCodecCtx->bits_per_coded_sample;
    pAudioOutputCodecContext->profile = pAudioCodecCtx->profile;
    //FF_PROFILE_AAC_LOW;
    // pAudioCodecCtx->bit_rate;

    // AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P
    //pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;//pAudioCodecCtx->sample_fmt;
    pAudioOutputCodecContext->sample_fmt = pAudioCodecCtx->sample_fmt;
    //pAudioOutputCodecContext->sample_fmt = AV_SAMPLE_FMT_U8;

    pAudioOutputCodecContext->sample_aspect_ratio = pAudioCodecCtx->sample_aspect_ratio;

    pAudioOutputCodecContext->time_base.num = pAudioCodecCtx->time_base.num;
    pAudioOutputCodecContext->time_base.den = pAudioCodecCtx->time_base.den;
    pAudioOutputCodecContext->ticks_per_frame = pAudioCodecCtx->ticks_per_frame;
    pAudioOutputCodecContext->frame_size = 1024;

    fprintf(stderr,"profile:%d, sample_rate:%d, channles:%d", pAudioOutputCodecContext->profile, pAudioOutputCodecContext->sample_rate, pAudioOutputCodecContext->channels);
    AVDictionary *opts = NULL;
    av_dict_set(&opts, "strict", "experimental", 0);

    if (avcodec_open2(pAudioOutputCodecContext, pAudioCodec, &opts) < 0) {
        fprintf(stderr, "\ncould not open codec\n");
    }

    av_dict_free(&opts);

#if 0
    // For Audio, this part is no need
    if(pAudioCodecCtx->extradata_size!=0)
    {
        NSLog(@"extradata_size !=0");
        pAudioOutputCodecContext->extradata = malloc(sizeof(uint8_t)*pAudioCodecCtx->extradata_size);
        memcpy(pAudioOutputCodecContext->extradata, pAudioCodecCtx->extradata, pAudioCodecCtx->extradata_size);
        pAudioOutputCodecContext->extradata_size = pAudioCodecCtx->extradata_size;
    }
    else
    {
        // For WMA test only
        pAudioOutputCodecContext->extradata_size = 0;
        NSLog(@"extradata_size ==0");
    }
#endif
}

if(fc->oformat->flags & AVFMT_GLOBALHEADER)
{
    pcc->flags |= CODEC_FLAG_GLOBAL_HEADER;
    pAudioOutputCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}

if ( !( fc->oformat->flags & AVFMT_NOFILE ) )
{
    vRet = avio_open( &fc->pb, fc->filename, AVIO_FLAG_WRITE );
    if(vRet!=0)
    {
        fprintf(stderr,"avio_open(%s) error", fc->filename);
    }
}

// dump format in console
av_dump_format(fc, 0, pFilePath, 1);

vRet = avformat_write_header( fc, NULL );
if(vRet==0)
    return 1;
else
    return 0;
}

av_interleaved_write_frame:

void h264_file_write_frame(AVFormatContext *fc, int vStreamIdx, const void* p, int len, int64_t dts, int64_t pts )
{
    AVStream *pst = NULL;
    AVPacket pkt;

if ( 0 > vVideoStreamIdx )
    return;

// may be audio or video
pst = fc->streams[ vStreamIdx ];

// Init packet
av_init_packet( &pkt );

if(vStreamIdx ==vVideoStreamIdx)
{
    pkt.flags |= ( 0 >= getVopType( p, len ) ) ? AV_PKT_FLAG_KEY : 0;
    //pkt.flags |= AV_PKT_FLAG_KEY;
    pkt.stream_index = pst->index;
    pkt.data = (uint8_t*)p;
    pkt.size = len;


    pkt.dts = AV_NOPTS_VALUE;
    pkt.pts = AV_NOPTS_VALUE;

    // TODO: mark or unmark the log
    //fprintf(stderr, "dts=%lld, pts=%lld\n",dts,pts);
    // av_write_frame( fc, &pkt );
}
av_interleaved_write_frame( fc, &pkt );
}

av_write_trailer:

void h264_file_close(AVFormatContext *fc)
{
if ( !fc )
    return;

av_write_trailer( fc );


if ( fc->oformat && !( fc->oformat->flags & AVFMT_NOFILE ) && fc->pb )
    avio_close( fc->pb );

av_free( fc );
}

感谢。

1 个答案:

答案 0 :(得分:0)

看起来您对输入和输出使用相同的AVFormatContext?

在第

pst = fc->streams[ vStreamIdx ];

您从与输入相关联的AVFormatContext(RTSP流)中分配AVStream *。但是之后您又尝试将数据包写回相同的上下文av_interleaved_write_frame( fc, &pkt );。我认为上下文是一个文件,它帮助我更好地导航这类事物。我做了一些你正在做的事情(不是iOS),我为每个输入(RTSP流)和输出(mp4文件)使用单独的AVFormatContext。如果我是正确的,我认为您需要做的是初始化AVFormatContext并正确。

以下代码(没有错误检查所有内容)是我采取AVFormatContext * output_format_context = NULL和我与RTSP流相关联的AVFormatContext * input_format_context并从一个到另一个写入的方法。这是在我获取了一个数据包等之后,在你的情况下它看起来像你正在填充(我只是从av_read_frame中获取数据包并重新打包它。

这是可能在您的写帧功能中的代码(但它也包括标题的写入)。

AVFormatContext * output_format_context;
AVStream * in_stream_2;
AVStream * out_stream_2;
// Allocate the context with the output file
avformat_alloc_output_context2(&output_format_context, NULL, NULL, out_filename.c_str());
// Point to AVOutputFormat * output_format for manipulation
output_format = output_format_context->oformat;
// Loop through all streams
for (i = 0; i < input_format_context->nb_streams; i++) {
    // Create a pointer to the input stream that was allocated earlier in the code
    AVStream *in_stream = input_format_context->streams[i];
    // Create a pointer to a new stream that will be part of the output
    AVStream *out_stream = avformat_new_stream(output_format_context, in_stream->codec->codec);
    // Set time_base of the new output stream to equal the input stream one since I'm not changing anything (can avoid but get a deprecation warning)
    out_stream->time_base = in_stream->time_base;
    // This is the non-deprecated way of copying all the parameters from the input stream into the output stream since everything stays the same
    avcodec_parameters_from_context(out_stream->codecpar, in_stream->codec);
    // I don't remember what this is for :)
    out_stream->codec->codec_tag = 0;
    // This just sets a flag from the format context to the stream relating to the header
    if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
         out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
// Check NOFILE flag and open the output file context (previously the output file was associated with the format context, now it is actually opened.
if (!(output_format->flags & AVFMT_NOFILE))
    avio_open(&output_format_context->pb, out_filename.c_str(), AVIO_FLAG_WRITE);
// Write the header (not sure if this is always needed but h264 I believe it is.
avformat_write_header(output_format_context,NULL);
// Re-getting the appropriate stream that was populated above (this should allow for both audio/video)
in_stream_2 = input_format_context->streams[packet.stream_index];
out_stream_2 = output_format_context->streams[packet.stream_index];
// Rescaling pts and dts, duration and pos - you would do as you need in your code.
packet.pts = av_rescale_q_rnd(packet.pts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.dts = av_rescale_q_rnd(packet.dts, in_stream_2->time_base, out_stream_2->time_base, (AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
packet.duration = av_rescale_q(packet.duration, in_stream_2->time_base, out_stream_2->time_base);
packet.pos = -1;
// The first packet of my stream always gives me negative dts/pts so this just protects that first one for my purposes.  You probably don't need.
if (packet.dts < 0) packet.dts = 0;
if (packet.pts < 0) packet.pts = 0;
// Finally write the frame
av_interleaved_write_frame(output_format_context, &packet);
// ....
// Write header, close/cleanup... etc
// ....

此代码相当简单,并且不包含设置(听起来你无论如何都是正确的)。我还想象这个代码可以为你的目的进行清理和调整,但这对我来说可以将RTSP流重新编写成一个文件(在我的情况下是很多文件,但代码没有显示)。

代码是C代码,因此您可能需要进行一些小调整以使其与Swift兼容(对于某些库函数调用可能)。我认为总的来说它应该兼容。

希望这有助于指出正确的方向。由于有几个示例代码源(我不记得在哪里),以及来自图书馆本身的警告提示,这是拼凑在一起的。