在opgel纹理上显示ffmpeg帧

时间:2014-05-09 14:44:05

标签: opengl ffmpeg textures

我正在使用Dranger tutorial01(ffmpeg)对视频进行解码并获取AVI帧。我想使用OpenGL来显示视频。

http://dranger.com/ffmpeg/tutorial01.html

主要功能如下:

int main (int argc, char** argv) {
// opengl stuff
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("Hello GL");

glutReshapeFunc(changeViewport);
glutDisplayFunc(render);

GLenum err = glewInit();
if(GLEW_OK !=err){
    fprintf(stderr, "GLEW error");
    return 1;
}

glClear(GL_COLOR_BUFFER_BIT);


glEnable(GL_TEXTURE_2D);
GLuint texture;
glGenTextures(1, &texture); //Make room for our texture
glBindTexture(GL_TEXTURE_2D, texture);

//ffmpeg stuff

 AVFormatContext *pFormatCtx = NULL;
 int             i, videoStream;
 AVCodecContext  *pCodecCtx = NULL;
 AVCodec         *pCodec = NULL;
 AVFrame         *pFrame = NULL; 
 AVFrame         *pFrameRGB = NULL;
 AVPacket        packet;
 int             frameFinished;
 int             numBytes;
 uint8_t         *buffer = NULL;

 AVDictionary    *optionsDict = NULL;


 if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
 }
 // Register all formats and codecs

av_register_all();

 // Open video file
 if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
   return -1; // Couldn't open file

// Retrieve stream information

if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information

// Dump information about file onto standard error
 av_dump_format(pFormatCtx, 0, argv[1], 0);

// Find the first video stream

videoStream=-1;
 for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
  videoStream=i;
  break;
}
 if(videoStream==-1)
return -1; // Didn't find a video stream

 // Get a pointer to the codec context for the video stream
 pCodecCtx=pFormatCtx->streams[videoStream]->codec;

 // Find the decoder for the video stream
 pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
 if(pCodec==NULL) {
   fprintf(stderr, "Unsupported codec!\n");
   return -1; // Codec not found
 }
 // Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
   return -1; // Could not open codec

 // Allocate video frame
 pFrame=av_frame_alloc();

 // Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;

 // Determine required buffer size and allocate buffer
 numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
              pCodecCtx->height);
 buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

struct SwsContext      *sws_ctx = sws_getContext(pCodecCtx->width,
           pCodecCtx->height, pCodecCtx->pix_fmt, 800,
           600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
           NULL, NULL);


 // Assign appropriate parts of buffer to image planes in pFrameRGB
 // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
 // of AVPicture
 avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
     pCodecCtx->width, pCodecCtx->height);

 // Read frames and save first five frames to disk
 i=0;
 while(av_read_frame(pFormatCtx, &packet)>=0) {


// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
  // Decode video frame
  avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
           &packet);

  // Did we get a video frame?
  if(frameFinished) {
// Convert the image from its native format to RGB
  /*  sws_scale
    (
        sws_ctx,
        (uint8_t const * const *)pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );
   */
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
  // additional opengl
    glBindTexture(GL_TEXTURE_2D, texture);

        //gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
   // glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);

        glTexImage2D(GL_TEXTURE_2D,                //Always GL_TEXTURE_2D
            0,                            //0 for now
            GL_RGB,                       //Format OpenGL uses for image
            pCodecCtx->width, pCodecCtx->height,  //Width and height
            0,                            //The border of the image
            GL_RGB, //GL_RGB, because pixels are stored in RGB format
            GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
                            //as unsigned numbers
            pFrameRGB->data[0]);               //The actual pixel data
  // additional opengl end   

// Save the frame to disk
if(++i<=5)
  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
        i);
  }
}

glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
    glTexCoord2f(0,1);
    glVertex3f(0,0,0);

    glTexCoord2f(1,1);
    glVertex3f(pCodecCtx->width,0,0);

    glTexCoord2f(1,0);
    glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

    glTexCoord2f(0,0);
    glVertex3f(0,pCodecCtx->height,0);

glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
 }


  // Free the RGB image
 av_free(buffer);
 av_free(pFrameRGB);

 // Free the YUV frame
 av_free(pFrame);

 // Close the codec
 avcodec_close(pCodecCtx);

 // Close the video file
 avformat_close_input(&pFormatCtx);

 return 0;
}

不幸的是我找不到我的解决方案

ffmpeg video to opengl texture

程序编译但不显示纹理上的任何视频。只创建一个OpenGL窗口。

2 个答案:

答案 0 :(得分:3)

一个问题是您使用单个缓冲像素格式。大多数现代操作系统使用依赖于双缓冲像素格式的窗口组合。很容易改变:

--- glutInitDisplayMode(GLUT_RGBA);
+++ glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);

render函数调用结束glutSwapBuffers()

另一个问题是您永远不会输入glutMainLoop,因此不会处理任何事件(例如来自操作系统的绘图请求)。您的代码的某些部分也必须进入渲染功能。

帧解码和纹理上传必须放在idle处理程序(您没有创建一个),然后调用glutPostRedisplay()或直接放在render函数中:

 void render(void) {
 /* ... */

 --- while(av_read_frame(pFormatCtx, &packet)>=0) {
 +++ if(av_read_frame(pFormatCtx, &packet)>=0) {


// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
  // Decode video frame
  avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

  // Did we get a video frame?
  if(frameFinished) {
  // Convert the image from its native format to RGB
  /*  sws_scale
    (
        sws_ctx,
        (uint8_t const * const *)pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );
   */
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
  // additional opengl
   glBindTexture(GL_TEXTURE_2D, texture);

此时你应该使用glTexSubImage2D而不是glTexImage2D,因为它的速度更快。但是你必须先用glTexImage2D创建纹理;在拨打glutMainLoop()之前执行此操作。

   glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);

/*
        glTexImage2D(GL_TEXTURE_2D,              //Always GL_TEXTURE_2D
            0,                                   //0 for now
            GL_RGB,                              //Format OpenGL uses for image
            pCodecCtx->width, pCodecCtx->height, //Width and height
            0,                                   //The border of the image
            GL_RGB,                              //GL_RGB, because pixels are stored 
                                                 //in RGB format
            GL_UNSIGNED_BYTE,                    //GL_UNSIGNED_BYTE, because pixels are 
                                                 //stored as unsigned numbers
            pFrameRGB->data[0]);                 //The actual pixel data
*/
  // additional opengl end   
}

glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
    glTexCoord2f(0,1);
    glVertex3f(0,0,0);

    glTexCoord2f(1,1);
    glVertex3f(pCodecCtx->width,0,0);

    glTexCoord2f(1,0);
    glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

    glTexCoord2f(0,0);
    glVertex3f(0,pCodecCtx->height,0);

glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
 }

/* ... */

glutSwapBuffers();

}

答案 1 :(得分:0)

我做的事与你几乎完全一样,并且遇到了一个问题,看起来像是一个不正确解码的图像,我的视频应该在那里播放。我发现这个页面试图解决这个问题,并且引用datenwolf的答案是通过添加glTexSubImage2D函数来获得视频播放。但是,当我尝试将glTexImage2D函数取出时,除了单个初始调用创建,我以前工作的视频被替换为白色矩形。所以出于某种原因,它在我调用glTexImage2D时跟随每个帧的glTexSubImage2D都适用。