我正在用ffmpeg和opengl编写视频程序。生成2个纹理并在绘制时交换它们。 一次调用的DrawFirstFrame代码是
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished)
{
f++;
pFrameRGB=avcodec_alloc_frame();
//Allocate memory for the raw data we get when converting.
uint8_t *buffer;
int numBytes;
numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t));
//Associate frame with our buffer
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
pFrameRGB->linesize[0]= pCodecCtx->width*3; // in case of rgb4 one plane
struct SwsContext* swsContext = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC,
NULL, NULL, NULL);
if (swsContext == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
};
sws_scale(swsContext, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
current_Vtex=&VideoTexture;
glGenTextures(1, &VideoTexture);
glBindTexture(GL_TEXTURE_2D, VideoTexture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// glTexImage2D(GL_TEXTURE_2D, 0, 3, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
glGenTextures(1, &VideoTexture2);
glBindTexture(GL_TEXTURE_2D, VideoTexture2);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// glTexImage2D(GL_TEXTURE_2D, 0, 3, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
//glDeleteTextures(1, &VideoTexture);
GLenum err;
while ((err = glGetError()) != GL_NO_ERROR)
{
cerr << "OpenGL error: " << err << endl;
}
av_free(buffer);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
if (f>1) break;
}
然后在一个周期
if (token.IsSameAs("frame"))
{
this->panel->DrawNextFrame();
wxPaintEvent evt;
this->panel->GetEventHandler()->AddPendingEvent(evt);
GetThread()->Sleep(500);
token=tkz.GetNextToken();
token.ToLong(&lastframe);
wxCriticalSectionLocker lock(*CSect);
this->fram->Clear();
}
DrawNextFrame()代码在这里,试图更新DrawFirstFrame中生成的纹理
void BasicGLPane::DrawNextFrame()
{
int f=1;
while(av_read_frame(pFormatCtx, &packet)>=0)
{
// Is this a packet from the video stream?
if(packet.stream_index==videoStream)
{
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &FrameFinished,
&packet);
// Did we get a video frame?
if(FrameFinished)
{
f++;
pFrameRGB=avcodec_alloc_frame();
//Allocate memory for the raw data we get when converting.
uint8_t *buffer;
int numBytes;
numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t));
//Associate frame with our buffer
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
pFrameRGB->linesize[0]= pCodecCtx->width*3; // in case of rgb4 one plane
struct SwsContext* swsContext = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC,
NULL, NULL, NULL);
if (swsContext == NULL)
{
fprintf(stderr, "Cannot initialize the conversion context!\n");
exit(1);
};
sws_scale(swsContext, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
//glGenTextures(1, &VideoTexture);
if ((*current_Vtex)==VideoTexture) current_Vtex = &VideoTexture2;else current_Vtex = &VideoTexture;
glBindTexture(GL_TEXTURE_2D, (*current_Vtex));
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
//glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
//glDeleteTextures(1, &VideoTexture);
GLenum err;
while ((err = glGetError()) != GL_NO_ERROR)
{
cerr << "OpenGL error: " << err << endl;
}
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
if (f>1) break;
}
av_free(pFrameRGB);
Refresh();
}
渲染函数在四边形中渲染(* current_Vtex)。
if ((*current_Vtex) != 0)
{
GLfloat z = 0;
glEnable(GL_TEXTURE_2D);
glPushMatrix();
glBindTexture(GL_TEXTURE_2D, (*current_Vtex));
glBegin(GL_QUADS);
glTexCoord2i(0, 0);
glVertex3f(0.0f, 0.0f, z);
glTexCoord2i(1, 0);
glVertex3f(Width, 0,z);
glTexCoord2i(1, 1);
glVertex3f(Width, Height,z);
glTexCoord2i(0, 1);
glVertex3f(0, Height,z);
glEnd();
glPopMatrix();
glDisable(GL_TEXTURE_2D);
}
wxCriticalSectionLocker lock(*CSect);
this->fram->render();
glFlush();
SwapBuffers();
问题是屏幕上的图片没有更新(为什么?)只显示DrawFirstFrame中生成的内容。我当然知道程序在绘制时会交换纹理。