使用libav而不是ffmpeg

时间:2015-01-07 02:45:16

标签: video ffmpeg gstreamer libav

我想通过http流式传输视频,我正在使用ogg(theora + vorbis),现在我有发送器和接收器,我可以使用命令行运行它们:

发信人:

ffmpeg -f video4linux2 -s 320x240 -i /dev/mycam -codec:v libtheora -qscale:v 5 -f ogg http://127.0.0.1:8080 

接收器:

sudo gst-launch-0.10 tcpserversrc port = 8080 ! oggdemux ! theoradec ! autovideosink

现在,发送者同时发送音频和视频,但接收者只播放视频。

它完美无缺,但现在我不想使用ffmpeg而只使用libav *。

这是我的流媒体课程:

class VCORE_LIBRARY_EXPORT VVideoWriter : private boost::noncopyable
{
public:
    VVideoWriter( );
    ~VVideoWriter( );

    bool openFile( const std::string& name,
                   int fps, int videoBitrate, int width, int height,
                   int audioSampleRate, bool stereo, int audioBitrate );
    void close( );

    bool writeVideoFrame( const uint8_t* image, int64_t timestamp );
    bool writeAudioFrame( const int16_t* data, int64_t timestamp  );

    int audioFrameSize( ) const;

private:
    AVFrame *m_videoFrame;
    AVFrame *m_audioFrame;

    AVFormatContext *m_context;
    AVStream *m_videoStream;
    AVStream *m_audioStream;

    int64_t m_startTime;
};

初​​始化:

bool VVideoWriter::openFile( const std::string& name,
                             int fps, int videoBitrate, int width, int height,
                             int audioSampleRate, bool stereo, int audioBitrate )
{
         if( ! m_context )
         {
            // initalize the AV context
            m_context = avformat_alloc_context( );
            assert( m_context );

            // get the output format
            m_context->oformat = av_guess_format( "ogg", name.c_str( ), nullptr );
            if( m_context->oformat )
            {
                strcpy( m_context->filename, name.c_str( ) );

                auto codecID = AV_CODEC_ID_THEORA;
                auto codec = avcodec_find_encoder( codecID );

                if( codec )
                {
                    m_videoStream = avformat_new_stream( m_context, codec );
                    assert( m_videoStream );

                    // initalize codec
                    auto codecContext = m_videoStream->codec;
                    bool globalHeader = m_context->oformat->flags & AVFMT_GLOBALHEADER;
                    if( globalHeader )
                        codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
                    codecContext->codec_id = codecID;
                    codecContext->codec_type = AVMEDIA_TYPE_VIDEO;
                    codecContext->width = width;
                    codecContext->height = height;
                    codecContext->time_base.den = fps;
                    codecContext->time_base.num = 1;
                    codecContext->bit_rate = videoBitrate;
                    codecContext->pix_fmt = PIX_FMT_YUV420P;
                    codecContext->flags |= CODEC_FLAG_QSCALE;
                    codecContext->global_quality = FF_QP2LAMBDA * 5;

                    int res = avcodec_open2( codecContext, codec, nullptr );

                    if( res >= 0 )
                    {
                        auto codecID = AV_CODEC_ID_VORBIS;
                        auto codec = avcodec_find_encoder( codecID );

                        if( codec )
                        {
                            m_audioStream = avformat_new_stream( m_context, codec );
                            assert( m_audioStream );
                            // initalize codec
                            auto codecContext = m_audioStream->codec;

                            bool globalHeader = m_context->oformat->flags & AVFMT_GLOBALHEADER;
                            if( globalHeader )
                                codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
                            codecContext->codec_id = codecID;
                            codecContext->codec_type = AVMEDIA_TYPE_AUDIO;
                            codecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
                            codecContext->bit_rate = audioBitrate;
                            codecContext->sample_rate = audioSampleRate;
                            codecContext->channels = stereo ? 2 : 1;
                            codecContext->channel_layout = stereo ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;

                            res = avcodec_open2( codecContext, codec, nullptr );

                            if( res >= 0 )
                            {
                                // try to open the file
                                if( avio_open( &m_context->pb, m_context->filename, AVIO_FLAG_WRITE ) >= 0 )
                                {
                                    m_audioFrame->nb_samples = codecContext->frame_size;
                                    m_audioFrame->format = codecContext->sample_fmt;
                                    m_audioFrame->channel_layout = codecContext->channel_layout;

                                    boost::posix_time::ptime time_t_epoch( boost::gregorian::date( 1970, 1, 1 ) );
                                    m_context->start_time_realtime = ( boost::posix_time::microsec_clock::universal_time( ) - time_t_epoch ).total_microseconds( );
                                    m_startTime = -1;

                                    // write the header
                                    if( avformat_write_header( m_context, nullptr ) >= 0 )
                                    {
                                        return true;
                                    }
                                    else std::cerr << "VVideoWriter: failed to write video header" << std::endl;
                                }
                                else std::cerr << "VVideoWriter: failed to open video file " << name << std::endl;
                            }
                            else std::cerr << "VVideoWriter: failed to initialize audio codec" << std::endl;
                        }
                        else std::cerr << "VVideoWriter: requested audio codec is not supported" << std::endl;
                    }
                    else std::cerr << "VVideoWriter: failed to initialize video codec" << std::endl;
                }
                else std::cerr << "VVideoWriter: requested video codec is not supported" << std::endl;
            }
            else std::cerr << "VVideoWriter: requested video format is not supported" << std::endl;

            avformat_free_context( m_context );
            m_context = nullptr;
            m_videoStream = nullptr;
            m_audioStream = nullptr;
        }
        return false;
}

撰写视频:

bool VVideoWriter::writeVideoFrame( const uint8_t* image, int64_t timestamp )
{
    if( m_context ) {
        auto codecContext = m_videoStream->codec;
        avpicture_fill( reinterpret_cast<AVPicture*>( m_videoFrame ),
                        const_cast<uint8_t*>( image ),
                        codecContext->pix_fmt, codecContext->width, codecContext->height );

        AVPacket pkt;
        av_init_packet( & pkt );
        pkt.data = nullptr;
        pkt.size = 0;
        int gotPacket = 0;
        if( ! avcodec_encode_video2( codecContext, &pkt, m_videoFrame, & gotPacket ) ) {
            if( gotPacket == 1 ) {
                pkt.stream_index = m_videoStream->index;
                int res;
                {
                    pkt.pts = AV_NOPTS_VALUE;
                    pkt.dts = AV_NOPTS_VALUE;
                    pkt.stream_index = m_videoStream->index;
                    res = av_write_frame( m_context, &pkt );
                }
                av_free_packet( & pkt );
                return res >= 0;
            }
            assert( ! pkt.size );
            return true;
        }
    }
    return false;
}

写音频(现在我写测试虚拟音频):

bool VVideoWriter::writeAudioFrame( const int16_t* data, int64_t timestamp )
{
    if( m_context ) {
        auto codecContext = m_audioStream->codec;

        int buffer_size = av_samples_get_buffer_size(nullptr, codecContext->channels, codecContext->frame_size, codecContext->sample_fmt, 0);

        float *samples = (float*)av_malloc(buffer_size);

        for (int i = 0; i < buffer_size / sizeof(float); i++)
            samples[i] = 1000. * sin((double)i/2.);

        int ret = avcodec_fill_audio_frame( m_audioFrame, codecContext->channels, codecContext->sample_fmt, (const uint8_t*)samples, buffer_size, 0);

        assert( ret >= 0 );
        (void)(ret);

        AVPacket pkt;
        av_init_packet( & pkt );
        pkt.data = nullptr;
        pkt.size = 0;
        int gotPacket = 0;
        if( ! avcodec_encode_audio2( codecContext, &pkt, m_audioFrame, & gotPacket ) ) {
            if( gotPacket == 1 ) {
                pkt.stream_index = m_audioStream->index;
                int res;
                {
                    pkt.pts = AV_NOPTS_VALUE;
                    pkt.dts = AV_NOPTS_VALUE;
                    pkt.stream_index = m_audioStream->index;
                    res = av_write_frame( m_context, &pkt );
                }
                av_free_packet( & pkt );
                return res >= 0;
            }
            assert( ! pkt.size );
            return true;
        }
        return false;
    }
    return false;
}

这是测试示例(我从网络摄像头和虚拟音频发送视频):

class TestVVideoWriter : public sigslot::has_slots<>
{
public:
    TestVVideoWriter( ) :
        m_fileOpened( false )
    {
    }

    void onCapturedFrame( cricket::VideoCapturer*, const cricket::CapturedFrame* capturedFrame )
    {
        if( m_fileOpened ) {
            m_writer.writeVideoFrame( reinterpret_cast<const uint8_t*>( capturedFrame->data ),
                                      capturedFrame->time_stamp / 1000 );
            m_writer.writeAudioFrame( nullptr , 0 );


        } else {
              m_fileOpened = m_writer.openFile( "http://127.0.0.1:8080",
                                                15, 40000, capturedFrame->width, capturedFrame->height,
                                                16000, false, 64000 );
        }
    }

public:
    vcore::VVideoWriter m_writer;
    bool m_fileOpened;
};

TestVVideoWriter testWriter;

BOOST_AUTO_TEST_SUITE(TEST_VIDEO_WRITER)

BOOST_AUTO_TEST_CASE(testWritingVideo)
{
    cricket::LinuxDeviceManager deviceManager;
    std::vector<cricket::Device> devs;
    if( deviceManager.GetVideoCaptureDevices( &devs ) ) {
        if( devs.size( ) ) {
            boost::shared_ptr<cricket::VideoCapturer> camera( deviceManager.CreateVideoCapturer( devs[ 0 ] ) );
            if( camera ) {
                cricket::VideoFormat format( 320, 240, cricket::VideoFormat::FpsToInterval( 30 ),
                                             camera->GetSupportedFormats( )->front( ).fourcc );
                cricket::VideoFormat best;
                if( camera->GetBestCaptureFormat( format, &best ) ) {
                    camera->SignalFrameCaptured.connect( &testWriter, &TestVVideoWriter::onCapturedFrame );
                    if( camera->Start( best ) != cricket::CS_FAILED ) {
                        boost::this_thread::sleep( boost::posix_time::seconds( 10 ) );
                        return;
                    }
                }
            }
        }
    }
    std::cerr << "Problem has occured with camera" << std::endl;
}

BOOST_AUTO_TEST_SUITE_END() // TEST_VIDEO_WRITER 

但是,在这种情况下,gstreamer只在我的测试程序停止执行时开始播放视频(在这种情况下10秒后)。它不适合我,我希望gstreamer在开始我的测试程序后立即开始播放。

有人可以帮助我吗?

P.S。抱歉我的英文。

1 个答案:

答案 0 :(得分:0)

最后,我得到了答案:我没有在音频帧中写pts和dts。