avcodec_receive_packet()没有看到输出

时间:2018-02-27 19:28:49

标签: ios encoding ffmpeg avcodec

我正在尝试创建一个转换器,用于制作一组图像中的视频。一切都在原处,AVFormatContextAVCodecContextAVCodec。我是用UIImage创建YUV AVFrame并通过avcodec_send_frame()方法将其发送到编码器。一切顺利,直到我尝试使用方法AVPacket获取avcodec_receive_packet()。每次返回-53表示 - output is not available in the current state - user must try to send input。正如我所说,在我尝试获取内容并发送成功之前,我发送了一个输入。

这是我的代码:

初始化ffmpeg实体:

- (BOOL)setupForConvert:(DummyFVPVideoFile *)videoFile outputPath:(NSString *)path
{
    if (!videoFile) {
        [self.delegate convertationFailed:@"VideoFile is nil!"];
        return NO;
    }
    currentVideoFile = videoFile;
    outputPath = path;
    BOOL success = NO;

    success = [self initFormatCtxAndCodecs:path];
    if (!success) {
        return NO;
    }

    success = [self addCameraStreams:videoFile];
    if (!success) {
        return NO;
    }

    success = [self openIOContext:path];
    if (!success) {
        return NO;
    }

    return YES;
}

- (BOOL)initFormatCtxAndCodecs:(NSString *)path
{
    //AVOutputFormat *fmt = av_guess_format("mp4", NULL, NULL);
    int ret = avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, [path UTF8String]);
    if (ret < 0) {
        NSLog(@"Couldn't create output context");
        return NO;
    }

    //encoder codec init
    pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!pCodec) {
        NSLog(@"Couldn't find a encoder codec!");
        return NO;
    }

    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (!pCodecCtx) {
        NSLog(@"Couldn't alloc encoder codec context!");
        return NO;
    }

    pCodecCtx->codec_tag = AV_CODEC_ID_H264;
    pCodecCtx->bit_rate = 400000;
    pCodecCtx->width = currentVideoFile.size.width;
    pCodecCtx->height = currentVideoFile.size.height;
    pCodecCtx->time_base = (AVRational){1, (int)currentVideoFile.framerate};
    pCodecCtx->framerate = (AVRational){(int)currentVideoFile.framerate, 1};
    pCodecCtx->gop_size = 10;
    pCodecCtx->max_b_frames = 1;
    pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        NSLog(@"Couldn't open the encoder codec!");
        return NO;
    }

    pPacket = av_packet_alloc();

    return YES;
}

- (BOOL)addCameraStreams:(DummyFVPVideoFile *)videoFile
{
    AVCodecParameters *params = avcodec_parameters_alloc();
    if (!params) {
        NSLog(@"Couldn't allocate codec parameters!");
        return NO;
    }

    if (avcodec_parameters_from_context(params, pCodecCtx) < 0) {
        NSLog(@"Couldn't copy parameters from context!");
        return NO;
    }

    for (int i = 0; i < videoFile.idCameras.count - 1; i++)
    {
        NSString *path = [videoFile.url URLByAppendingPathComponent:videoFile.idCameras[i]].path;
        AVStream *stream = avformat_new_stream(pFormatCtx, pCodec);
        if (!stream) {
            NSLog(@"Couldn't alloc stream!");
            return NO;
        }

        if (avcodec_parameters_copy(stream->codecpar, params) < 0) {
            NSLog(@"Couldn't copy parameters into stream!");
            return NO;
        }

        stream->avg_frame_rate.num = videoFile.framerate;
        stream->avg_frame_rate.den = 1;
        stream->codecpar->codec_tag = 0;    //some silly workaround
        stream->index = i;
        streams[path] = [[VideoStream alloc] initWithStream:stream];
    }

    return YES;
}

- (BOOL)openIOContext:(NSString *)path
{
    AVIOContext *ioCtx = nil;
    if (avio_open(&ioCtx, [path UTF8String], AVIO_FLAG_WRITE) < 0) {
        return NO;
    }
    pFormatCtx->pb = ioCtx;

    return YES;
}

这里的转换过程:

- (void)launchConvert:(DummyFVPVideoFile *)videoFile
{
    BOOL convertInProgress = YES;
    unsigned int frameCount = 1;
    unsigned long pts = 0;
    BOOL success = NO;

    success = [self writeHeader];
    if (!success) {
        NSLog(@"Couldn't write header!");
        return;
    }

    AVRational defaultTimeBase;
    defaultTimeBase.num = 1;
    defaultTimeBase.den = videoFile.framerate;
    AVRational streamTimeBase = streams.allValues.firstObject.stream->time_base;

    while (convertInProgress)
    {
        pts += av_rescale_q(1, defaultTimeBase, streamTimeBase);
        for (NSString *path in streams.allKeys)
        {
            UIImage *img = [UIImage imageWithContentsOfFile:[NSString stringWithFormat:@"%@/%u.jpg", path, frameCount]];
            AVPacket *pkt = [self getAVPacket:img withPts:pts];
            if (!pkt->data) {   continue;   }
            pkt->stream_index = streams[path].stream->index;
            //check all settings of pkt

            if (![self writePacket:pkt]) {
                NSLog(@"Couldn't write packet!");
                convertInProgress = NO;
                break;
            }
        }

        frameCount++;
    }

    success = [self writeTrailer];
    if (!success) {
        NSLog(@"Couldn't write trailer!");
        return;
    }

    NSLog(@"Convertation finished!");
    //delegate convertationFinished method
}

- (BOOL)writeHeader
{
    if (avformat_write_header(pFormatCtx, NULL) < 0) {
        return NO;
    }

    return YES;
}

- (BOOL)writePacket:(AVPacket *)pkt
{
    if (av_interleaved_write_frame(pFormatCtx, pkt) != 0) {
        return NO;
    }

    return YES;
}

- (BOOL)writeTrailer
{
    if (av_write_trailer(pFormatCtx) != 0) {
        return NO;
    }

    return YES;
}


/**
 This method will create AVPacket out of UIImage.

 @return AVPacket
 */
- (AVPacket *)getAVPacket:(UIImage *)img withPts:(unsigned long)pts
{
    if (!img) {
        NSLog(@"imgData is nil!");
        return nil;
    }
    uint8_t *imgData = [self getPixelDataFromImage:img];

    AVFrame *frame_yuv = av_frame_alloc();
    if (!frame_yuv) {
        NSLog(@"frame_yuv is nil!");
        return nil;
    }
    frame_yuv->format = AV_PIX_FMT_YUV420P;
    frame_yuv->width = (int)img.size.width;
    frame_yuv->height = (int)img.size.height;

    int ret = av_image_alloc(frame_yuv->data,
                               frame_yuv->linesize,
                               frame_yuv->width,
                               frame_yuv->height,
                               frame_yuv->format,
                               32);
    if (ret < 0) {
        NSLog(@"Couldn't alloc yuv frame!");
        return nil;
    }

    struct SwsContext *sws_ctx = nil;
    sws_ctx = sws_getContext((int)img.size.width, (int)img.size.height, AV_PIX_FMT_RGB24,
                             (int)img.size.width, (int)img.size.height, AV_PIX_FMT_YUV420P,
                             0, NULL, NULL, NULL);
    const uint8_t *scaleData[1] = { imgData };
    int inLineSize[1] = { 4 * img.size.width };
    sws_scale(sws_ctx, scaleData, inLineSize, 0, (int)img.size.height, frame_yuv->data, frame_yuv->linesize);

    frame_yuv->pict_type = AV_PICTURE_TYPE_I;
    frame_yuv->pts = pCodecCtx->frame_number;

    ret = avcodec_send_frame(pCodecCtx, frame_yuv);   //every time everything is fine
    if (ret != 0) {
        NSLog(@"Couldn't send yuv frame!");
        return nil;
    }

    av_init_packet(pPacket);
    pPacket->dts = pPacket->pts = pts;
    do {
        ret = avcodec_receive_packet(pCodecCtx, pPacket);   //every time -35 error
        NSLog(@"ret = %d", ret);
        if (ret == AVERROR_EOF) {
            NSLog(@"AVERROR_EOF!");
        } else if (ret == AVERROR(EAGAIN)) {
            NSLog(@"AVERROR(EAGAIN)");
        } else if (ret == AVERROR(EINVAL)) {
            NSLog(@"AVERROR(EINVAL)");
        }
        if (ret != 0) {
            NSLog(@"Couldn't receive packet!");
            //return nil;
        }
    } while ( ret == 0 );

    free(imgData);
    av_packet_unref(pPacket);
    av_packet_free(pPacket);
    av_frame_unref(&frame_yuv);
    av_frame_free(&frame_yuv);
    //perform other clean up and test dat shit

    return pPacket;
}

任何见解都会有所帮助。谢谢!

1 个答案:

答案 0 :(得分:1)

可能有两个原因。

  1. 根据FFmpeg的文档,您可能需要向avcodec_send_frame()提供多个数据包以接收数据包返回成功。

  2. 我无法确认您为pPacket分配了足够大小的缓冲区。函数av_packet_alloc()av_init_packet()不会分配任何缓冲区,但后者将其设置为NULL。所以必须在init之后完成分配。您应该在某处手动或使用av_new_packet(pPacket, SIZE)分配缓冲区。

  3. 希望有所帮助。