使用FFMPEG对Mpeg2T的视频PID进行多线程解码

时间:2015-10-21 08:42:14

标签: multithreading visual-c++ ffmpeg mpeg2-ts

我正在使用VC ++中的应用程序来显示使用FFMPEG的视频Pid of mpeg2ts流的视频帧,并且需要做同样的事情,对于其他mpeg2stream同时使用多线程进程,我的源代码是:

int main (int argc, char* argv[])
{
av_register_all();
avformat_network_init();
pFormatCtx = avformat_alloc_context();

if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
    printf("Couldn't open input stream.\n");
    return -1;
}
if(avformat_find_stream_info(pFormatCtx,NULL)<0){
    printf("Couldn't find stream information.\n");
    return -1;
}
videoindex=-1;
for(i=0; i<pFormatCtx->nb_streams; i++) 
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
        videoindex=i;
        break;
    }
if(videoindex==-1){
    printf("Didn't find a video stream.\n");
    return -1;
}

pCodecCtx=pFormatCtx->streams[videoindex]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL){
    printf("Codec not found.\n");
    return -1;
}
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
    printf("Could not open codec.\n");
    return -1;
}  

pFrame=av_frame_alloc();
pFrameYUV=av_frame_alloc();
out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
packet=(AVPacket *)av_malloc(sizeof(AVPacket));
//Output Info-----------------------------
printf("--------------- File Information ----------------\n");
av_dump_format(pFormatCtx,0,filepath,0);
printf("-------------------------------------------------\n");
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
    pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

#if OUTPUT_YUV420P 
    fp_yuv=fopen("output.yuv","wb+");  
#endif  

if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
    printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
    return -1;
} 

screen_w = pCodecCtx->width;
screen_h = pCodecCtx->height;
//SDL 2.0 Support for multiple windows
screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
    screen_w, screen_h, SDL_WINDOW_OPENGL);

if(!screen) {  
    printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
    return -1;
}

sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
//IYUV: Y + U + V  (3 planes)
//YV12: Y + V + U  (3 planes)
sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

sdlRect.x=0;
sdlRect.y=0;
sdlRect.w=screen_w;
sdlRect.h=screen_h;
//SDL End----------------------
BYTE buffer [4] ;
int nSize = 0 ;
int nByteCnt = 0 ;
int nPreviuosPos = 0 ;
mpgfile = fopen ("D:\\00_Projects\\Farzan II\\SampleData\\Yahsat1996V_N_PID(2101).pes", "rb");

while(av_read_frame(pFormatCtx, packet)>=0 /*&& nSize > 0*/)
{
    if(packet->stream_index==videoindex)
    {
        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
        if(ret < 0)
        {
            printf("Decode Error.\n");
            return -1;
        }
        if(got_picture)
        {
            sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
                pFrameYUV->data, pFrameYUV->linesize);              
#if OUTPUT_YUV420P
            y_size=pCodecCtx->width*pCodecCtx->height;  
            fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y
            fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
            fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
//SDL---------------------------
#if 0
            SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
#else
              SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
                pFrameYUV->data[0], pFrameYUV->linesize[0],
            pFrameYUV->data[1], pFrameYUV->linesize[1],
            pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif              
            SDL_RenderClear( sdlRenderer );  
            SDL_RenderCopy(  sdlRenderer, sdlTexture,  NULL, &sdlRect);  
            SDL_RenderPresent( sdlRenderer );  
            //SDL End-----------------------
            //Delay 40ms
            SDL_Delay(40);
        }
    }
    av_free_packet(packet);
}
//flush decoder
//FIX: Flush Frames remained in Codec
while (1) {
    ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
    if (ret < 0)
        break;
    if (!got_picture)
        break;
    sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
        pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
    int y_size=pCodecCtx->width*pCodecCtx->height;  
    fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
    fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
    fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
    //SDL---------------------------
    SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0],       pFrameYUV->linesize[0] );  
        SDL_RenderClear( sdlRenderer );  
        SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
        SDL_RenderPresent( sdlRenderer );  
        //SDL End-----------------------
        //Delay 40ms
        SDL_Delay(40);
}

sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
    fclose(fp_yuv);
#endif 

SDL_Quit();

av_frame_free(&pFrameYUV);
av_frame_free(&pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);

return 0;
}

当我在一个线程 中调用它时效果很好但是,在多线程中调用此函数后,错误发生访问冲突 ,是否有人指导我解决?

0 个答案:

没有答案