FFMPEG的自定义缓冲区

时间:2015-12-03 03:10:43

标签: c++ windows video ffmpeg buffer

我对ffmpeg的缓冲区读取有疑问。 想法如下:一个外部模块(不能改变它)为我提供数据块的视频流,它给我输入数据和它的大小(字节)(" framfunction"功能输入参数)。我必须将输入数据复制到缓冲区并使用ffmpeg(Zeranoe)读取并提取视频帧。每次我收到新数据,我的功能" framfunction"将被召唤。第一次运行的所有未处理数据将在缓冲区的开头移动,然后在第二次运行时移动新数据,依此类推。它基本上基于source和Dranger教程。我目前的尝试是这样的,只是看一下代码中的评论(我只留下关于当前缓冲功能的那些)以获得我想要做的图片(我知道它很麻烦而且它有效 - 有点像;跳过一些框架。欢迎任何关于ffmpeg代码和缓冲区设计的建议):

#include <iostream>
#include <string>

extern "C"
{
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libavformat/avio.h>
#include <libavutil/file.h>
}
struct buffer_data {
   uint8_t *ptr;
   size_t size;
};

static int read_packet(void *opaque, uint8_t *buf, int buf_size)
{
   struct buffer_data *bd = (struct buffer_data *)opaque;
   buf_size = FFMIN(buf_size, bd->size);
   memcpy(buf, bd->ptr, buf_size);
   bd->ptr += buf_size;
   bd->size -= buf_size;
   return buf_size;
}

class videoclass
{
private:
   uint8_t* inputdatabuffer;
   size_t offset;

public:
   videoclass();
   ~videoclass();
   int framfunction(uint8_t* inputbytes, int inputbytessize);
};

videoclass::videoclass()
   : inputdatabuffer(nullptr)
   , offset(0)
{
   inputdatabuffer = new uint8_t[8388608]; //buffer where the input data will be stored
}

videoclass::~videoclass()
{
   delete[] inputdatabuffer;
}


int videoclass::framfunction(uint8_t* inputbytes, int inputbytessize)
{
   int i, videoStream, numBytes, frameFinished;
   AVFormatContext *pFormatCtx = NULL;
   AVCodecContext *pCodecCtx = NULL;
   AVIOContext   *avio_ctx = NULL;
   AVCodec   *pCodec = NULL;
   AVFrame   *pFrame = NULL;
   AVFrame   *pFrameRGB = NULL;
   AVPacket packet;
   uint8_t   *buffer = NULL;
   uint8_t   *avio_ctx_buffer = NULL;
   size_t   avio_ctx_buffer_size = 4096;
   size_t   bytes_processed = 0;
   struct buffer_data bd = { 0 };

   //if (av_file_map("sample.ts", &inputbytes, &inputbytessize, 0, NULL) < 0)//
   //   return -1;

   memcpy(inputdatabuffer + offset, inputbytes, inputbytessize);//copy new data to buffer inputdatabuffer with offset calculated at the end of previous function run. In other words - cope new data after unprocessed data from a previous call 
   offset += inputbytessize; //total number of bytes in buffer. Size of an unprocessed data from the last run + size of new data (inputbytessize)

   bd.ptr = inputdatabuffer;
   bd.size = offset;

   if (!(pFormatCtx = avformat_alloc_context()))
      return -1;
   avio_ctx_buffer = (uint8_t *)av_malloc(avio_ctx_buffer_size);
   avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,0, &bd, &read_packet, NULL, NULL);
   pFormatCtx->pb = avio_ctx;

   av_register_all(); 
   avcodec_register_all();

   pFrame = av_frame_alloc(); 
   pFrameRGB = av_frame_alloc(); 

   if (avformat_open_input(&pFormatCtx, NULL, NULL, NULL) != 0) 
      return -2;
   if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
      return -3;

   videoStream = -1;
   for (i = 0; i < pFormatCtx->nb_streams; i++)
      if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) { 
         videoStream = i;
         break;
      }
   if (videoStream == -1) 
      return -4;

   pCodecCtx = pFormatCtx->streams[videoStream]->codec; 

   pCodec = avcodec_find_decoder(pCodecCtx->codec_id); 
   if (pCodec == NULL){
      std::cout << "Unsupported codec" << std::endl;
      return -5;
   }

   if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
      return -6; 

   numBytes = avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 
   buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

   avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height); 

   while (av_read_frame(pFormatCtx, &packet) >= 0){
      if (packet.stream_index == videoStream){ 
         avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); 
         if (frameFinished){ 
            std::cout << "Yaay, frame found" << std::endl;
            }

      }
      av_free_packet(&packet); 

      bytes_processed = (size_t)pFormatCtx->pb->pos; //data which is processed so far (x bytes out of inputbytessize)??????????????????????????????
   }

   offset -= bytes_processed; //size of unprocessed data

   av_free(buffer);
   av_free(pFrameRGB);
   av_free(pFrame);

   avcodec_close(pCodecCtx);

   av_freep(&avio_ctx->buffer);
   av_freep(&avio_ctx);


   avformat_close_input(&pFormatCtx);


   memmove(inputdatabuffer, inputdatabuffer + bytes_processed, offset);//move unprocessed data to begining of the main buffer

      return 0;
}

调用我的函数将是这样的

WHILE(VIDEO_INPUT)
{
    READ VIDEO DATA FROM INPUT BUFFER
    STORE DATA FROM INPUT BUFFER AND SIZE OP THAT DATA TO VARIABLES NEW_DATA AND NEW_DATA_SIZE
    CALL FUNCTION FRAMMUNCTION AND PASS NEW_DATA AND NEW_DATA_FUNCTION
    DO OTHER THINGS
}

我想知道的是未处理数据的确切大小。代码中的注释显示了我的尝试,但我认为它不够好,所以我需要一些帮助解决这个问题。

编辑:神奇的问题是如何获得正确的&#34; bytes_processed&#34;尺寸。我还制作了一个pdf,解释了我的缓冲区应该如何工作pdf file 谢谢

0 个答案:

没有答案