"找不到moov原子"使用av_interleaved_write_frame但不使用avio_write时

时间:2017-10-08 21:38:55

标签: ffmpeg libavcodec x264

我正在尝试将一个可以使用任意帧并使用ffmpeg 3.3.3 API从中构建视频的类组合在一起。我一直在努力为此找到一个很好的例子,因为这些示例似乎仍在使用已弃用的函数,因此我尝试使用标题中的文档并通过引用一些github repos来修补此问题。似乎在使用新版本。

如果我使用av_interleaved_write_frame将编码数据包写入输出,则ffprobe输出以下内容:

[mov,mp4,m4a,3gp,3g2,mj2 @ 0000000002760120] moov atom not found0
X:\Diagnostics.mp4: Invalid data found when processing input

ffplay无法播放使用此方法生成的文件。

如果我改为将其交换为对avio_write的调用,则输入ffprobe:

Input #0, h264, from 'X:\Diagnostics.mp4':
  Duration: N/A, bitrate: N/A
    Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc

ffplay可以 播放此文件,直到它结束,当它输出时:

Input #0, h264, from 'X:\Diagnostics.mp4':    0KB sq=    0B f=0/0
  Duration: N/A, bitrate: N/A
    Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc
[h264 @ 000000000254ef80] error while decoding MB 31 22, bytestream -65
[h264 @ 000000000254ef80] concealing 102 DC, 102 AC, 102 MV errors in I frame
    nan M-V:    nan fd=   1 aq=    0KB vq=    0KB sq=    0B f=0/0

VLC无法播放任何一种方法的文件。第二种方法的文件显示一个黑色帧然后隐藏视频输出。第一个没有显示任何内容。他们都没有提供视频时长。

有没有人有任何想法在这里发生了什么?我认为我的解决方案已接近工作,因为我正在获得大量有效帧。

代码:

void main()
{
    OutputStream Stream( "Output.mp4", 672, 380, 25, true );
    Stream.Initialize();

    int i = 100;
    while( i-- )
    {
        //... Generate a frame

        Stream.WriteFrame( Frame );
    }
    Stream.CloseFile();
}

OutputStream::OutputStream( const std::string& Path, unsigned int Width, unsigned int Height, int Framerate, bool IsBGR )
: Stream()
, FrameIndex( 0 )
{
    auto& ID = *m_InternalData;

    ID.Path = Path;

    ID.Width = Width;
    ID.Height= Height;
    ID.Framerate.num = Framerate;
    ID.Framerate.den = 1;

    ID.PixelFormat = IsBGR ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24;
    ID.CodecID = AV_CODEC_ID_H264;
    ID.CodecTag = 0;

    ID.AspectRatio.num = 1;
    ID.AspectRatio.den = 1;
}

CameraStreamError OutputStream::Initialize()
{
    av_log_set_callback( &InputStream::LogCallback );
    av_register_all();
    avformat_network_init();

    auto& ID = *m_InternalData;

    av_init_packet( &ID.Packet );

    int Result = avformat_alloc_output_context2( &ID.FormatContext, nullptr, nullptr, ID.Path.c_str() );
    if( Result < 0 || !ID.FormatContext )
    {
        STREAM_ERROR( UnknownError );
    }

    AVCodec* Encoder = avcodec_find_encoder( ID.CodecID );

    if( !Encoder )
    {
        STREAM_ERROR( NoH264Support );
    }

    AVStream* OutStream = avformat_new_stream( ID.FormatContext, Encoder );
    if( !OutStream )
    {
        STREAM_ERROR( UnknownError );
    }

    ID.CodecContext = avcodec_alloc_context3( Encoder );
    if( !ID.CodecContext )
    {
        STREAM_ERROR( NoH264Support );
    }

    ID.CodecContext->time_base = av_inv_q(ID.Framerate);

    {
        AVCodecParameters* CodecParams = OutStream->codecpar;

        CodecParams->width = ID.Width;
        CodecParams->height = ID.Height;
        CodecParams->format = AV_PIX_FMT_YUV420P;
        CodecParams->codec_id = ID.CodecID;
        CodecParams->codec_type = AVMEDIA_TYPE_VIDEO;
        CodecParams->profile = FF_PROFILE_H264_MAIN;
        CodecParams->level = 40;

        Result = avcodec_parameters_to_context( ID.CodecContext, CodecParams );
        if( Result < 0 )
        {
            STREAM_ERROR( EncoderCreationError );
        }
    }

    if( ID.IsVideo )
    {
        ID.CodecContext->width = ID.Width;
        ID.CodecContext->height = ID.Height;
        ID.CodecContext->sample_aspect_ratio = ID.AspectRatio;
        ID.CodecContext->time_base = av_inv_q(ID.Framerate);

        if( Encoder->pix_fmts )
        {
            ID.CodecContext->pix_fmt = Encoder->pix_fmts[0];
        }
        else
        {
            ID.CodecContext->pix_fmt = ID.PixelFormat;
        }
    }
    //Snip

    Result = avcodec_open2( ID.CodecContext, Encoder, nullptr );
    if( Result < 0 )
    {
        STREAM_ERROR( EncoderCreationError );
    }

    Result = avcodec_parameters_from_context( OutStream->codecpar, ID.CodecContext );
    if( Result < 0 )
    {
        STREAM_ERROR( EncoderCreationError );
    }

    if( ID.FormatContext->oformat->flags & AVFMT_GLOBALHEADER )
    {
        ID.CodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }

    OutStream->time_base = ID.CodecContext->time_base;
    OutStream->avg_frame_rate= av_inv_q(OutStream->time_base);

    if( !( ID.FormatContext->oformat->flags & AVFMT_NOFILE ) )
    {
        Result = avio_open( &ID.FormatContext->pb, ID.Path.c_str(), AVIO_FLAG_WRITE );
        if( Result < 0 )
        {
            STREAM_ERROR( FileNotWriteable );
        }
    }

    Result = avformat_write_header( ID.FormatContext, nullptr );
    if( Result < 0 )
    {
        STREAM_ERROR( WriteFailed );
    }

    ID.Output = std::make_unique<FFMPEG::Frame>( ID.CodecContext->width, ID.CodecContext->height, ID.CodecContext->pix_fmt );

    ID.ConversionContext = sws_getCachedContext(
        ID.ConversionContext,
        ID.Width,
        ID.Height,
        ID.PixelFormat,
        ID.CodecContext->width,
        ID.CodecContext->height,
        ID.CodecContext->pix_fmt,
        SWS_BICUBIC,
        NULL,
        NULL,
        NULL );

    return CameraStreamError::Success;
}

CameraStreamError OutputStream::WriteFrame( FFMPEG::Frame* Frame )
{
    auto& ID = *m_InternalData;

    ID.Output->Prepare();

    int OutputSliceSize = sws_scale( m_InternalData->ConversionContext, Frame->GetFrame()->data, Frame->GetFrame()->linesize, 0, Frame->GetHeight(), ID.Output->GetFrame()->data, ID.Output->GetFrame()->linesize );

    ID.Output->GetFrame()->pts = ID.CodecContext->frame_number;

    int Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
    if( Result == AVERROR(EAGAIN) )
    {
        CameraStreamError ResultErr = SendAll();
        if( ResultErr != CameraStreamError::Success )
        {
            return ResultErr;
        }
        Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
    }

    if( Result == 0 )
    {
        CameraStreamError ResultErr = SendAll();
        if( ResultErr != CameraStreamError::Success )
        {
            return ResultErr;
        }
    }

    FrameIndex++;

    return CameraStreamError::Success;
}

CameraStreamError OutputStream::SendAll( void )
{
    auto& ID = *m_InternalData;

    int Result;
    do 
    {
        AVPacket TempPacket = {};
        av_init_packet( &TempPacket );

        Result = avcodec_receive_packet( GetData().CodecContext, &TempPacket );
        if( Result == 0 )
        {
            av_packet_rescale_ts( &TempPacket, ID.CodecContext->time_base, ID.FormatContext->streams[0]->time_base );

            TempPacket.stream_index = ID.FormatContext->streams[0]->index;

            //avio_write( ID.FormatContext->pb, TempPacket.data, TempPacket.size );
            Result = av_interleaved_write_frame( ID.FormatContext, &TempPacket );
            if( Result < 0 )
            {
                STREAM_ERROR( WriteFailed );
            }

            av_packet_unref( &TempPacket );
        }
        else if( Result != AVERROR(EAGAIN) )
        {
            continue;
        }
        else if( Result != AVERROR_EOF )
        {
            break;
        }
        else if( Result < 0 )
        {
            STREAM_ERROR( WriteFailed );
        }
    } while ( Result == 0);

    return CameraStreamError::Success;
}

CameraStreamError OutputStream::CloseFile()
{
    auto& ID = *m_InternalData;

    while( true )
    {
        //Flush
        int Result = avcodec_send_frame( ID.CodecContext, nullptr );
        if( Result == 0 )
        {
            CameraStreamError StrError = SendAll();
            if( StrError != CameraStreamError::Success )
            {
                return StrError;
            }
        }
        else if( Result == AVERROR_EOF )
        {
            break;
        }
        else
        {
            STREAM_ERROR( WriteFailed );
        }
    }

    int Result = av_write_trailer( ID.FormatContext );
    if( Result < 0 )
    {
        STREAM_ERROR( WriteFailed );
    }

    if( !(ID.FormatContext->oformat->flags& AVFMT_NOFILE) )
    {
        Result = avio_close( ID.FormatContext->pb );
        if( Result < 0 )
        {
            STREAM_ERROR( WriteFailed );
        }
    }

    return CameraStreamError::Success;
}

注意我已经简化了一些事情,并在其他地方内联了一些内容。我还删除了所有关闭代码,因为文件关闭后发生的任何事情都无关紧要。

此处有完整的回购:https://github.com/IanNorris/Witness如果你克隆了这个问题,问题在于&#39;诊断&#39;输出,输出文件没问题。 X有两条硬编码路径:。

1 个答案:

答案 0 :(得分:5)

您的avio_write()文件不是MP4文件,它只是按顺序压缩的H.264数据包,也称为AnnexB H.264。

要使用av_interleaved_frame_write()在容器中写入文件,您还需要在所有压缩视频/音频数据包之前调用avformat_write_header()之前和av_write_trailer()。没有它,文件不会包含全局标题(例如MP4中的moov块),并且不会被外部应用程序识别为有效文件,因为这里的错误确实表明了。 / p>

另请参阅muxing documentation中的更详细说明。