使用ANativeWindow渲染位图

时间:2016-02-19 00:40:54

标签: android video ffmpeg android-ndk java-native-interface

我正在解码视频帧并尝试渲染使用Android的ANativeWindow API和SurfaceView。我知道我正在成功解码帧,因为我的演示应用程序将解码(和重新编码)的帧作为位图返回并将其显示在ImageView(底部图像)中。但是,当尝试将解码帧绘制到SurfaceView时,我得到了垃圾输出(顶部图像)。有人可以解释原因吗?

const int TARGET_IMAGE_FORMAT = AV_PIX_FMT_RGBA;
const int TARGET_IMAGE_CODEC = AV_CODEC_ID_PNG;

void convert_image(State *state, AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avpkt, int *got_packet_ptr, int width, int height) {
        AVCodecContext *codecCtx;
        AVCodec *codec;
        AVFrame *frame;

        *got_packet_ptr = 0;

        if (width == -1) {
            width = pCodecCtx->width;
        }

        if (height == -1) {
            height = pCodecCtx->height;
        }

        codec = avcodec_find_encoder(TARGET_IMAGE_CODEC);
        if (!codec) {
            printf("avcodec_find_decoder() failed to find decoder\n");
            goto fail;
        }

        codecCtx = avcodec_alloc_context3(codec);
        if (!codecCtx) {
            printf("avcodec_alloc_context3 failed\n");
            goto fail;
        }

        codecCtx->bit_rate = pCodecCtx->bit_rate;
        //codecCtx->width = pCodecCtx->width;
        //codecCtx->height = pCodecCtx->height;
        codecCtx->width = width;
        codecCtx->height = height;
        codecCtx->pix_fmt = TARGET_IMAGE_FORMAT;
        codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
        codecCtx->time_base.num = pCodecCtx->time_base.num;
        codecCtx->time_base.den = pCodecCtx->time_base.den;

        if (!codec || avcodec_open2(codecCtx, codec, NULL) < 0) {
            printf("avcodec_open2() failed\n");
            goto fail;
        }

        frame = av_frame_alloc();

        if (!frame) {
            goto fail;
        }

        // Determine required buffer size and allocate buffer
        int numBytes = avpicture_get_size(TARGET_IMAGE_FORMAT, codecCtx->width, codecCtx->height);
        void * buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

        printf("wqwq %d\n", numBytes);

        avpicture_fill(((AVPicture *)frame),
                buffer,
                TARGET_IMAGE_FORMAT,
                codecCtx->width,
                codecCtx->height);

        avpicture_alloc(((AVPicture *)frame),
                TARGET_IMAGE_FORMAT,
                codecCtx->width,
                codecCtx->height);

        struct SwsContext *scalerCtx = sws_getContext(pCodecCtx->width, 
                pCodecCtx->height, 
                pCodecCtx->pix_fmt,
                //pCodecCtx->width,
                //pCodecCtx->height,
                width,
                height,
                TARGET_IMAGE_FORMAT, 
                SWS_FAST_BILINEAR, 0, 0, 0);

        if (!scalerCtx) {
            printf("sws_getContext() failed\n");
            goto fail;
        }

        sws_scale(scalerCtx,
                (const uint8_t * const *) pFrame->data,
                pFrame->linesize,
                0,
                pFrame->height,
                frame->data,
                frame->linesize);

        int ret = avcodec_encode_video2(codecCtx, avpkt, frame, got_packet_ptr);

        // code to draw the re-encoded frame on the surface view
        if (state->native_window) {    
            ANativeWindow_Buffer windowBuffer;

            if (ANativeWindow_lock(state->native_window, &windowBuffer, NULL) == 0) {
                memcpy(windowBuffer.bits, avpkt->data, windowBuffer.width * windowBuffer.height * 4);
                ANativeWindow_unlockAndPost(state->native_window);
            }
        }

        if (ret < 0) {
            *got_packet_ptr = 0;
        }

        fail:
        av_free(frame);

        free(buffer);

        if (codecCtx) {
            avcodec_close(codecCtx);
            av_free(codecCtx);
        }

        if (scalerCtx) {
            sws_freeContext(scalerCtx);
        }

        if (ret < 0 || !*got_packet_ptr) {
            av_free_packet(avpkt);
        }
    }

enter image description here

0 个答案:

没有答案