我已经从CDN下载了flv格式的视频(视频H264和音频AAC),并将它们重新下载到MP4格式。但视频受篇幅限制。所以我已经在几个部分下载了每个视频:从起点,点1,点2开始(通过在url中使用seek参数)。每个点的开始时间早于前一个点的结束
使用av_read_frame
我扫描了所有部分,发现相交的数据包不仅具有相同的大小和顺序,而且它们的dts / pts相互偏移恒定值。因此,要从第1点开始使用视频连续开始视频,我必须执行以下操作:
1.在输出文件中创建输出头
2.从开始视频中复制所有不相交的数据包
3.将从点1开始的视频中的所有非相交数据包复制为dts值,并将其移动为常数
如何使用libav(而非ffmpeg)完成所有这些操作?我读了How can libavformat be used without using other libav libraries。但是在libav
中,由于avformat_alloc_output_context2
中没有libav
,因此无效。对于像我这样的新手来说,源avconv.c
源也太复杂了,无法隔离与流复制操作相关的部分
有人可以举例说明:
- 打开 input_file1 和 input_file2 (仅当程序与通用教程中的标准不同时才需要)
- 使用相同的容器格式和相同的视频和音频格式打开和写入 output_file 标题
- 将来自 input_file1 的数据包写入 output_file 直至数据包,例如pos == XXX
- 将 input_file2 的数据包写入 output_file 以常数值更改其dts(或任何需要的)
- 写出正确的trailer
计算之前的dts中的时移。
答案 0 :(得分:3)
好的,我找到了那段代码,但正如我在评论中所说的,由于某些原因它无效。如果你能解决它,请告诉我
/*
* merge.c
*
* Created on: Nov 17, 2012
* Author: arash
*/
/* merge multiple "IDENTICAL" video file into one file */
#include <stdio.h>
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
AVFormatContext *i_fmt_ctx;
AVStream *i_video_stream;
AVFormatContext *o_fmt_ctx;
AVStream *o_video_stream;
int main(int argc, char* argv[])
{
if (argc < 3)
{
fprintf(stderr, "usage : %s <input> [<input>...] <output>\n", argv[0]);
return -1;
}
avcodec_register_all();
av_register_all();
/* should set to NULL so that avformat_open_input() allocate a new one */
i_fmt_ctx = NULL;
if (avformat_open_input(&i_fmt_ctx, argv[1], NULL, NULL)!=0)
{
fprintf(stderr, "could not open input file\n");
return -1;
}
if (av_find_stream_info(i_fmt_ctx)<0)
{
fprintf(stderr, "could not find stream info\n");
return -1;
}
//av_dump_format(i_fmt_ctx, 0, argv[1], 0);
/* find first video stream */
for (unsigned i=0; i<i_fmt_ctx->nb_streams; i++)
if (i_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
i_video_stream = i_fmt_ctx->streams[i];
break;
}
if (i_video_stream == NULL)
{
fprintf(stderr, "didn't find any video stream\n");
return -1;
}
avformat_alloc_output_context2(&o_fmt_ctx, NULL, NULL, argv[argc-1]);
/*
* since all input files are supposed to be identical (framerate, dimension, color format, ...)
* we can safely set output codec values from first input file
*/
o_video_stream = av_new_stream(o_fmt_ctx, 0);
{
AVCodecContext *c;
c = o_video_stream->codec;
c->bit_rate = 400000;
c->codec_id = i_video_stream->codec->codec_id;
c->codec_type = i_video_stream->codec->codec_type;
c->time_base.num = i_video_stream->time_base.num;
c->time_base.den = i_video_stream->time_base.den;
fprintf(stderr, "time_base.num = %d time_base.den = %d\n", c->time_base.num, c->time_base.den);
c->width = i_video_stream->codec->width;
c->height = i_video_stream->codec->height;
c->pix_fmt = i_video_stream->codec->pix_fmt;
printf("%d %d %d", c->width, c->height, c->pix_fmt);
c->flags = i_video_stream->codec->flags;
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
c->me_range = i_video_stream->codec->me_range;
c->max_qdiff = i_video_stream->codec->max_qdiff;
c->qmin = i_video_stream->codec->qmin;
c->qmax = i_video_stream->codec->qmax;
c->qcompress = i_video_stream->codec->qcompress;
}
avio_open(&o_fmt_ctx->pb, argv[argc-1], AVIO_FLAG_WRITE);
/* yes! this is redundant */
av_close_input_file(i_fmt_ctx);
avformat_write_header(o_fmt_ctx, NULL);
int last_pts = 0;
int last_dts = 0;
for (int i=1; i<argc-1; i++)
{
i_fmt_ctx = NULL;
if (avformat_open_input(&i_fmt_ctx, argv[i], NULL, NULL)!=0)
{
fprintf(stderr, "could not open input file\n");
return -1;
}
if (av_find_stream_info(i_fmt_ctx)<0)
{
fprintf(stderr, "could not find stream info\n");
return -1;
}
av_dump_format(i_fmt_ctx, 0, argv[i], 0);
/* we only use first video stream of each input file */
i_video_stream = NULL;
for (unsigned s=0; s<i_fmt_ctx->nb_streams; s++)
if (i_fmt_ctx->streams[s]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
i_video_stream = i_fmt_ctx->streams[s];
break;
}
if (i_video_stream == NULL)
{
fprintf(stderr, "didn't find any video stream\n");
return -1;
}
int64_t pts, dts;
while (1)
{
AVPacket i_pkt;
av_init_packet(&i_pkt);
i_pkt.size = 0;
i_pkt.data = NULL;
if (av_read_frame(i_fmt_ctx, &i_pkt) <0 )
break;
/*
* pts and dts should increase monotonically
* pts should be >= dts
*/
i_pkt.flags |= AV_PKT_FLAG_KEY;
pts = i_pkt.pts;
i_pkt.pts += last_pts;
dts = i_pkt.dts;
i_pkt.dts += last_dts;
i_pkt.stream_index = 0;
//printf("%lld %lld\n", i_pkt.pts, i_pkt.dts);
static int num = 1;
printf("frame %d\n", num++);
av_interleaved_write_frame(o_fmt_ctx, &i_pkt);
//av_free_packet(&i_pkt);
//av_init_packet(&i_pkt);
}
last_dts += dts;
last_pts += pts;
av_close_input_file(i_fmt_ctx);
}
av_write_trailer(o_fmt_ctx);
avcodec_close(o_fmt_ctx->streams[0]->codec);
av_freep(&o_fmt_ctx->streams[0]->codec);
av_freep(&o_fmt_ctx->streams[0]);
avio_close(o_fmt_ctx->pb);
av_free(o_fmt_ctx);
return 0;
}
答案 1 :(得分:2)
在av_new_stream()
中您应该添加以下代码:
c->extradata = i_video_stream->codec->extradata;
c->extradata_size = i_video_stream->codec->extradata_size;
答案 2 :(得分:1)
我已经替换了已弃用的方法,并且还添加了ALiang的更正。它有效。
`/*
* merge.c
*
* Created on: Nov 17, 2012
* Author: arash
*
* Modified on: April 5, 2018
* Editor: Chebotarev Michael
*/
/* merge multiple "IDENTICAL" video file into one file */
#include <stdio.h>
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
AVFormatContext *i_fmt_ctx;
AVStream *i_video_stream;
AVFormatContext *o_fmt_ctx;
AVStream *o_video_stream;
int main(int argc, char* argv[])
{
if (argc < 3)
{
fprintf(stderr, "usage : %s <input> [<input>...] <output>\n", argv[0]);
return -1;
}
avcodec_register_all();
av_register_all();
/* should set to NULL so that avformat_open_input() allocate a new one */
i_fmt_ctx = NULL;
if (avformat_open_input(&i_fmt_ctx, argv[1], NULL, NULL) != 0)
{
fprintf(stderr, "could not open input file\n");
return -1;
}
if (avformat_find_stream_info(i_fmt_ctx, NULL) < 0)
{
fprintf(stderr, "could not find stream info\n");
return -1;
}
//av_dump_format(i_fmt_ctx, 0, argv[1], 0);
/* find first video stream */
for (unsigned i = 0; i<i_fmt_ctx->nb_streams; i++)
if (i_fmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
i_video_stream = i_fmt_ctx->streams[i];
break;
}
if (i_video_stream == NULL)
{
fprintf(stderr, "didn't find any video stream\n");
return -1;
}
avformat_alloc_output_context2(&o_fmt_ctx, NULL, NULL, argv[argc - 1]);
/*
* since all input files are supposed to be identical (framerate, dimension, color format, ...)
* we can safely set output codec values from first input file
*/
o_video_stream = avformat_new_stream(o_fmt_ctx, 0);
{
AVCodecContext *c;
c = o_video_stream->codec;
c->bit_rate = 400000;
c->codec_id = i_video_stream->codec->codec_id;
c->codec_type = i_video_stream->codec->codec_type;
c->time_base.num = i_video_stream->time_base.num;
c->time_base.den = i_video_stream->time_base.den;
fprintf(stderr, "time_base.num = %d time_base.den = %d\n", c->time_base.num, c->time_base.den);
c->width = i_video_stream->codec->width;
c->height = i_video_stream->codec->height;
c->pix_fmt = i_video_stream->codec->pix_fmt;
printf("%d %d %d", c->width, c->height, c->pix_fmt);
c->flags = i_video_stream->codec->flags;
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
c->me_range = i_video_stream->codec->me_range;
c->max_qdiff = i_video_stream->codec->max_qdiff;
c->qmin = i_video_stream->codec->qmin;
c->qmax = i_video_stream->codec->qmax;
c->qcompress = i_video_stream->codec->qcompress;
c->extradata = i_video_stream->codec->extradata;
c->extradata_size = i_video_stream->codec->extradata_size;
}
avio_open(&o_fmt_ctx->pb, argv[argc - 1], AVIO_FLAG_WRITE);
/* yes! this is redundant */
avformat_close_input(&i_fmt_ctx);
avformat_write_header(o_fmt_ctx, NULL);
int last_pts = 0;
int last_dts = 0;
for (int i = 1; i<argc - 1; i++)
{
i_fmt_ctx = NULL;
if (avformat_open_input(&i_fmt_ctx, argv[i], NULL, NULL) != 0)
{
fprintf(stderr, "could not open input file\n");
return -1;
}
if (avformat_find_stream_info(i_fmt_ctx, NULL) < 0)
{
fprintf(stderr, "could not find stream info\n");
return -1;
}
av_dump_format(i_fmt_ctx, 0, argv[i], 0);
/* we only use first video stream of each input file */
i_video_stream = NULL;
for (unsigned s = 0; s<i_fmt_ctx->nb_streams; s++)
if (i_fmt_ctx->streams[s]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
i_video_stream = i_fmt_ctx->streams[s];
break;
}
if (i_video_stream == NULL)
{
fprintf(stderr, "didn't find any video stream\n");
return -1;
}
int64_t pts, dts;
while (1)
{
AVPacket i_pkt;
av_init_packet(&i_pkt);
i_pkt.size = 0;
i_pkt.data = NULL;
if (av_read_frame(i_fmt_ctx, &i_pkt) <0)
break;
/*
* pts and dts should increase monotonically
* pts should be >= dts
*/
i_pkt.flags |= AV_PKT_FLAG_KEY;
pts = i_pkt.pts;
i_pkt.pts += last_pts;
dts = i_pkt.dts;
i_pkt.dts += last_dts;
i_pkt.stream_index = 0;
//printf("%lld %lld\n", i_pkt.pts, i_pkt.dts);
static int num = 1;
printf("frame %d\n", num++);
av_interleaved_write_frame(o_fmt_ctx, &i_pkt);
//av_free_packet(&i_pkt);
//av_init_packet(&i_pkt);
}
last_dts += dts;
last_pts += pts;
avformat_close_input(&i_fmt_ctx);
}
av_write_trailer(o_fmt_ctx);
avcodec_close(o_fmt_ctx->streams[0]->codec);
av_freep(&o_fmt_ctx->streams[0]->codec);
av_freep(&o_fmt_ctx->streams[0]);
avio_close(o_fmt_ctx->pb);
av_free(o_fmt_ctx);
return 0;
}`