我正在将一些图像写入AVStream,然后读取mp3文件并将其写入其他AVStream。问题是音频流比视频流短一些,因此如果我添加更多图像和另一个音频文件,则音频不再与视频同步。因此,我的想法是在将另一个音频文件写入音频流之前,先将无声音频数据写入音频流。但是我不知道如何将无声数据写入音频流。
我发现了this个帖子,但我不知道如何计算数据包大小或如何将数据包写入音频流。
到目前为止,这是我最成功的方法,但是result (audioTest(0xff).mp4)远非沉默。
/* set up the audio convert context */
libffmpeg::SwrContext* audioConvertContext = libffmpeg::swr_alloc();
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "in_sample_fmt", libffmpeg::AV_SAMPLE_FMT_S16, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "out_sample_fmt", data->audioCodecContext->sample_fmt, 0);
int ret = libffmpeg::swr_init(audioConvertContext);
if (ret < 0)
{
Helper::ThrowError("Failed to allocate audio reformat context.", ret);
}
/* set up silent frame */
libffmpeg::AVFrame* silentFrame = libffmpeg::av_frame_alloc();
if (!silentFrame)
{
Helper::ThrowError("Failed to allocate audio encode frame.");
}
silentFrame->nb_samples = data->audioCodecContext->frame_size;
silentFrame->format = data->audioCodecContext->sample_fmt;
silentFrame->channel_layout = data->audioCodecContext->channel_layout;
silentFrame->channels = data->audioCodecContext->channels;
silentFrame->sample_rate = data->audioCodecContext->sample_rate;
/* alloc the frame buffer */
ret = libffmpeg::av_frame_get_buffer(silentFrame, 0);
if (ret < 0)
{
Helper::ThrowError("Could not allocate audio data buffers.");
}
int got_output;
int samples_count;
double duration = 4 * (double)data->audioStream->time_base.den / (double)data->audioStream->time_base.num;
while (av_stream_get_end_pts(data->audioStream) < duration)
{
libffmpeg::AVPacket pkt;
libffmpeg::av_init_packet(&pkt);
ret = libffmpeg::av_frame_make_writable(silentFrame);
if (ret < 0)
{
Helper::ThrowError("Could not make frame writable.");
}
for (int j = 0; j < data->audioCodecContext->frame_size; j++)
{
silentFrame->data[0][2 * j] = 0xff;
for (int k = 1; k < data->audioCodecContext->channels; k++)
{
silentFrame->data[0][2 * j + k] = silentFrame->data[0][2 * j];
}
}
int dst_nb_samples = libffmpeg::av_rescale_rnd(
libffmpeg::swr_get_delay(audioConvertContext, data->audioCodecContext->sample_rate) + silentFrame->nb_samples,
data->audioCodecContext->sample_rate, data->audioCodecContext->sample_rate,
libffmpeg::AV_ROUND_UP);
ret = libffmpeg::swr_convert(
audioConvertContext,
silentFrame->data, dst_nb_samples,
(const libffmpeg::uint8_t * *) & silentFrame->data,
silentFrame->nb_samples);
if (ret < 0)
{
Helper::ThrowError("Error while converting audio frame.", ret);
}
silentFrame->pts = libffmpeg::av_rescale_q(samples_count, libffmpeg::AVRational{ 1, data->audioCodecContext->sample_rate }, data->audioCodecContext->time_base);
samples_count += dst_nb_samples;
ret = libffmpeg::avcodec_encode_audio2(data->audioCodecContext, &pkt, silentFrame, &got_output);
if (ret < 0)
{
Helper::ThrowError("Error while encoding audio frame.", ret);
}
if (got_output)
{
pkt.stream_index = data->audioStream->index;
if (ret = av_write_frame(data->formatContext, &pkt))
{
Helper::ThrowError("Error while writing audio frame.", ret);
}
libffmpeg::av_packet_unref(&pkt);
}
}
libffmpeg::av_frame_free(&silentFrame);
答案 0 :(得分:0)
错误是我如何写数组。我不习惯c ++,所以我的解决方案可能有点混乱,但至少现在可以使用。
/* set up the audio convert context */
libffmpeg::SwrContext* audioConvertContext = libffmpeg::swr_alloc();
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_count", data->audioCodecContext->channels, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_channel_layout", data->audioCodecContext->channel_layout, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "in_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_int(audioConvertContext, "out_sample_rate", data->audioCodecContext->sample_rate, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "in_sample_fmt", libffmpeg::AV_SAMPLE_FMT_S16, 0);
libffmpeg::av_opt_set_sample_fmt(audioConvertContext, "out_sample_fmt", data->audioCodecContext->sample_fmt, 0);
int ret = libffmpeg::swr_init(audioConvertContext);
if (ret < 0)
{
Helper::ThrowError("Failed to allocate audio reformat context.", ret);
}
/* set up silent frame */
libffmpeg::AVFrame* silentFrame = libffmpeg::av_frame_alloc();
if (!silentFrame)
{
Helper::ThrowError("Failed to allocate audio encode frame.");
}
silentFrame->nb_samples = data->audioCodecContext->frame_size;
silentFrame->format = data->audioCodecContext->sample_fmt;
silentFrame->channel_layout = data->audioCodecContext->channel_layout;
silentFrame->channels = data->audioCodecContext->channels;
silentFrame->sample_rate = data->audioCodecContext->sample_rate;
/* alloc the frame buffer */
ret = libffmpeg::av_frame_get_buffer(silentFrame, 0);
if (ret < 0)
{
Helper::ThrowError("Could not allocate audio data buffers.");
}
libffmpeg::AVPacket* pkt = libffmpeg::av_packet_alloc();
if (!pkt)
{
Helper::ThrowError("could not allocate the packet.");
}
void* buffer = malloc(data->audioCodecContext->frame_size * data->audioCodecContext->channels * 16);
for (int i = 0; i < data->audioCodecContext->frame_size * data->audioCodecContext->channels * 2; i++)
{
*((int*)buffer + i) = 0x0;
}
int got_output;
int samples_count;
double duration = 4 * (double)data->audioStream->time_base.den / (double)data->audioStream->time_base.num;
while (av_stream_get_end_pts(data->audioStream) < duration)
{
libffmpeg::AVPacket pkt;
libffmpeg::av_init_packet(&pkt);
ret = libffmpeg::av_frame_make_writable(silentFrame);
if (ret < 0)
{
Helper::ThrowError("Could not make frame writable.");
}
silentFrame->data[0] = (libffmpeg::uint8_t*) buffer;
int dst_nb_samples = libffmpeg::av_rescale_rnd(
libffmpeg::swr_get_delay(audioConvertContext, data->audioCodecContext->sample_rate) + silentFrame->nb_samples,
data->audioCodecContext->sample_rate, data->audioCodecContext->sample_rate,
libffmpeg::AV_ROUND_UP);
ret = libffmpeg::swr_convert(
audioConvertContext,
silentFrame->data, dst_nb_samples,
(const libffmpeg::uint8_t * *) & silentFrame->data,
silentFrame->nb_samples);
if (ret < 0)
{
Helper::ThrowError("Error while converting audio frame.", ret);
}
silentFrame->pts = libffmpeg::av_rescale_q(samples_count, libffmpeg::AVRational{ 1, data->audioCodecContext->sample_rate }, data->audioCodecContext->time_base);
samples_count += dst_nb_samples;
ret = libffmpeg::avcodec_encode_audio2(data->audioCodecContext, &pkt, silentFrame, &got_output);
if (ret < 0)
{
Helper::ThrowError("Error while encoding audio frame.", ret);
}
if (got_output)
{
pkt.stream_index = data->audioStream->index;
if (ret = av_write_frame(data->formatContext, &pkt))
{
Helper::ThrowError("Error while writing audio frame.", ret);
}
libffmpeg::av_packet_unref(&pkt);
}
}
free(buffer);
libffmpeg::av_frame_free(&silentFrame);