我试图将相机输出(分辨率:640 x 480)更改为1024 x 720并在Android屏幕上渲染视频帧。是否可以使用ffmpeg和SDL库进行此视频转换?如果是这样的话,ffmepeg编解码库中有哪些API可用来做同样的事情?
以下是我获得640 x 480分辨率输出的代码:
//Registering all the formats:
av_register_all();
AVFormatContext *pFormatCtx=NULL;
int i, videoStream;
AVCodecContext *pCodecCtx=NULL;
AVCodec *pCodec=NULL;
AVFrame *pFrame;
AVPacket packet;
int frameFinished;
SDL_Texture *bmp;
SDL_Renderer *renderer;
SDL_Window *screen;
if (SDL_Init(SDL_INIT_VIDEO)) {
LOGD( "Could not initialize SDL - %s\n", SDL_GetError());
SDL_Quit();
exit(1);
}
LOGD(" SDL Initialized..");
screen = SDL_CreateWindow("Window", SDL_WINDOWPOS_UNDEFINED,
SDL_WINDOWPOS_UNDEFINED, 0, 0,
SDL_WINDOW_SHOWN | SDL_WINDOW_FULLSCREEN);
LOGD("SDL Screen Created ..");
renderer = SDL_CreateRenderer(screen,-1,SDL_RENDERER_ACCELERATED | SDL_RENDERER_TARGETTEXTURE);
LOGD("Rendering Created...");
bmp = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV,SDL_TEXTUREACCESS_STREAMING,640,480);
LOGD("Texture created;");
SDL_RenderSetLogicalSize(renderer,640,480);
// Open video file
if(avformat_open_input(&pFormatCtx,"Filename", NULL,NULL)!=0)
LOGD("Cannot open the File");
pFormatCtx->interrupt_callback.callback = decode_interrupt_cb;
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx,NULL)<0)
LOGD("Cannot retrive Stream info");
// Couldn't find stream information
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
LOGD("Cannot find Video Stream:");
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
LOGD("Unable to find the decoder");
// Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
LOGD("Unable to OPEN Codec");
// Could not open codec
// Allocate video frame
pFrame=avcodec_alloc_frame();
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,&packet);
// Did we get a video frame?
if(frameFinished) {
//----------------Code for Displaying
SDL_UpdateYUVTexture(bmp, NULL, pFrame->data[0],
pFrame->linesize[0], pFrame->data[1], pFrame->linesize[1],
pFrame->data[2], pFrame->linesize[2]);
retcl = SDL_RenderClear(renderer);
retcopy = SDL_RenderCopy(renderer, bmp, NULL, NULL);
SDL_RenderPresent(renderer);
//-----------------
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}
答案 0 :(得分:0)
ffmpeg使用“filtergraph”来应用缩放,裁剪等过滤器。如果查看filtering video example,您将看到如何以编程方式应用过滤器。如果您可以将该示例集成到您的代码中,那么您所要做的就是更改字符串 - filter_descr - 以使视频缩放到您想要的分辨率。
过滤器的定义方式与独立程序相同。有关详细信息,请参阅documentation。