FFmpeg使用奇异的分辨率时创建无效的输出

时间:2019-02-09 01:54:57

标签: c++ ffmpeg

我正在使用FFmpeg将原始YUV帧编码和多路复用为.mp4文件。在我尝试使用像1440x1080之类的更奇特的分辨率之前,这一直很好用:

在检查我的代码并将FFmpeg更新为最新的每晚版本I之后,我创建了此mcve:

#include <iostream>

extern "C" {
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}

#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P

using namespace std;

struct FFmpegEncoder {
    AVStream *avStream;
    AVFormatContext *avFormatContext;
    AVOutputFormat *avOutputFormat;
    AVCodecContext *avCodecContext;
    AVCodec *avCodec;

    int64_t nextFrameIndex = 0;
    AVFrame *frame;

    void open(int width, int height, int fps, const char* path);
    virtual void encode(AVFrame* frame, int frameLength);
    virtual void close();

    FFmpegEncoder();
};

void fill_yuv_image3(unsigned char **pict, int frame_index, int width, int height) {
    int x, y, i;
    i = frame_index;

    for (y = 0; y < height; y++)
        for (x = 0; x < width; x++)
            pict[0][y * width + x] = x + y + i * 3;

    for (y = 0; y < height / 2; y++) {
        for (x = 0; x < width / 2; x++) {
            pict[1][y * (width / 2) + x] = 128 + y + i * 2;
            pict[2][y * (width / 2) + x] = 64 + x + i * 5;
        }
    }
}

void FFmpegEncoder::open(int width, int height, int fps, const char* filename) {
    avformat_alloc_output_context2(&avFormatContext, NULL, NULL, filename);
    avOutputFormat = avFormatContext->oformat;
    avCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    avStream = avformat_new_stream(avFormatContext, NULL);
    avStream->id = avFormatContext->nb_streams - 1;
    avCodecContext = avcodec_alloc_context3(avCodec);

    avCodecContext->codec_id = AV_CODEC_ID_H264;
    avCodecContext->bit_rate = width * height * fps;
    avCodecContext->width = width;
    avCodecContext->height = height;
    avStream->time_base.den = fps;
    avStream->time_base.num = 1;
    avCodecContext->time_base = avStream->time_base;
    avCodecContext->gop_size = 15;
    avCodecContext->pix_fmt = STREAM_PIX_FMT;
    avCodecContext->thread_count = 16;

    if (avFormatContext->oformat->flags & AVFMT_GLOBALHEADER) {
        avCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }

    avcodec_open2(avCodecContext, avCodec, NULL);

    frame = av_frame_alloc();
    frame->format = avCodecContext->pix_fmt;
    frame->width = avCodecContext->width;
    frame->height = avCodecContext->height;
    av_frame_get_buffer(frame, 0);

    avcodec_parameters_from_context(avStream->codecpar, avCodecContext);
    av_dump_format(avFormatContext, 0, filename, 1);

    avio_open(&avFormatContext->pb, filename, AVIO_FLAG_WRITE);
    avformat_write_header(avFormatContext, NULL);
}

void FFmpegEncoder::encode(AVFrame* frame, int _frameLength) {
    AVPacket* pkt = new AVPacket();

    av_init_packet(pkt);

    frame->pts = nextFrameIndex++;

    avcodec_send_frame(avCodecContext, frame);

    if (avcodec_receive_packet(avCodecContext, pkt) == 0) {
        av_packet_rescale_ts(pkt, avCodecContext->time_base, avStream->time_base);
        pkt->stream_index = avStream->index;
        av_write_frame(avFormatContext, pkt);
    }
    delete pkt;
}

void FFmpegEncoder::close() {
    av_write_trailer(avFormatContext);
    avcodec_free_context(&avCodecContext);
    av_frame_free(&frame);

    if (!(avOutputFormat->flags & AVFMT_NOFILE)) {
        avio_closep(&avFormatContext->pb);
    }
    avformat_free_context(avFormatContext);
}

FFmpegEncoder::FFmpegEncoder() {

}


int main(int argc, char **argv) {
    FFmpegEncoder encoder;
    int width = 1440; //when using 1920 here it works fine
    int height = 1080;

    encoder.open(width, height, 30, "testoutput.mp4");
    int frameCount = 200;

    //Allocate testframes
    unsigned char*** frames = new unsigned char**[frameCount];
    for (int i = 0; i < frameCount; i++) {
        frames[i] = new unsigned char*[3];
        frames[i][0] = new unsigned char[width * height];
        frames[i][1] = new unsigned char[(width / 2) * (height / 2)];
        frames[i][2] = new unsigned char[(width / 2) * (height / 2)];
        fill_yuv_image3(frames[i], i, width, height);
    }

    AVFrame* avFrame = av_frame_alloc();
    avFrame->format = STREAM_PIX_FMT;
    avFrame->width = width;
    avFrame->height = height;
    av_frame_get_buffer(avFrame, 0);

    //start encoding
    for (int i = 0; i < frameCount; i++) {
        memcpy(avFrame->data[0], frames[i][0], width * height);
        memcpy(avFrame->data[1], frames[i][1], (width / 2) * (height / 2));
        memcpy(avFrame->data[2], frames[i][2], (width / 2) * (height / 2));
        encoder.encode(avFrame, 0);
    }
    encoder.close();

    return 0;
}

我知道代码仍然很长,但是我甚至删除了错误处理以减少其长度。

请注意:

  • 输出文件可在所有普通播放器中播放,并且外观相同
  • 将分辨率宽度更改为更常见的分辨率宽度(例如1280、1600、1920)时,输出看起来非常好
  • 我尝试了以下编解码器:MPEG4,x264,openh264

从命令行使用ffmpeg创建尺寸相同的文件:

ffmpeg -i valid1920x1080.mp4 -s 1440x1080 -c:a copy output.mp4

正在创建有效输出,因此必须可行。

哪个设置错误?我看了过时的muxingencoding示例,但无法解释我做错了什么。

1 个答案:

答案 0 :(得分:2)

发送到编码器的帧的所有平面都对齐到一个32倍(或更大)的线宽上。对于格式YUV420P,色度平面的宽度是亮度分量的一半。因此,对于1440的帧宽度,色度宽度为720,并且720%32!=0。但是,已为U和V缓冲区分配了宽度x高度的初始大小。因此,当执行memcpy时,下一行的前(width/2 - (width/2) % 32)个元素将被复制到发送到编码器的帧的跨步元素中。如Q图像所示,这将产生视觉失真。

校正是将原始色度平面缓冲区填充到跨步对齐的大小。在评论中链接的OP的修改是

void fill_yuv_imageY(unsigned char **pict, int frame_index, int width, int height) {
    int x, y, i;
    i = frame_index;

    for (y = 0; y < height; y++)
        for (x = 0; x < width; x++)
            pict[0][y * width + x] = x + y + i * 3;
}

void fill_yuv_imageUV(unsigned char **pict, int frame_index, int halfWidth, int height) {
    int x, y, i;
    for (y = 0; y < height / 2; y++) {
        for (x = 0; x < halfWidth; x++) {
            pict[1][y * halfWidth + x] = 128 + y + i * 2;
            pict[2][y * halfWidth + x] = 64 + x + i * 5;
        }
    }
}

int roundUp(int numToRound, int multiple){
    if (multiple == 0)
        return numToRound;

    int remainder = numToRound % multiple;
    if (remainder == 0)
        return numToRound;

    return numToRound + multiple - remainder;
}

//Allocating test frames
unsigned char*** frames = new unsigned char**[frameCount];
for (int i = 0; i < frameCount; i++) {
    frames[i] = new unsigned char*[3];
    frames[i][0] = new unsigned char[width * height];
    fill_yuv_imageY(frames[i], i, width, height);
    frames[i][1] = new unsigned char[roundUp(width / 2, 32) * (height / 2)];
    frames[i][2] = new unsigned char[roundUp(width / 2, 32) * (height / 2)];
    fill_yuv_imageUV(frames[i], i, roundUp(width / 2, 32), height);
}