为什么在转换YUV_420_888到Android NDK图像中的RGBA_8888旋转90度?

时间:2017-11-15 09:51:17

标签: android android-ndk yuv rgba

我在NDK中通过此方法在本机方法中从imagereader转换为yug格式转换为Rgba格式:

size_t bufferSize = buffer.width * buffer.height * (size_t)4;
uint8_t * outPtr = reinterpret_cast<uint8_t *>(buffer.bits);
for (size_t y = 0; y < srcHeight; y++)
{
    uint8_t * Y_rowPtr = srcYPtr + y * Y_rowStride;
    uint8_t * U_rowPtr = srcUPtr + (y >> 1) * U_rowStride;
    uint8_t * V_rowPtr = srcVPtr + (y >> 1) * V_rowStride;

    for (size_t x = 0; x < srcWidth; x++)
    {
        uint8_t Y = Y_rowPtr[x];
        uint8_t U = U_rowPtr[(x >> 1)];
        uint8_t V = V_rowPtr[(x >> 1)];

        double R = (Y + (V - 128) * 1.40625);
        double G = (Y - (U - 128) * 0.34375 - (V - 128) * 0.71875);
        double B = (Y + (U - 128) * 1.765625);

        *(outPtr + (--bufferSize)) = 255; // gamma for RGBA_8888
        *(outPtr + (--bufferSize)) = (uint8_t) (B > 255 ? 255 : (B < 0 ? 0 : B));
        *(outPtr + (--bufferSize)) = (uint8_t) (G > 255 ? 255 : (G < 0 ? 0 : G));
        *(outPtr + (--bufferSize)) = (uint8_t) (R > 255 ? 255 : (R < 0 ? 0 : R));

    }
}

为什么图像旋转了90度?

更新:

我使用此转换: https://www.fourcc.org/fccyvrgb.php 但图像仍保持原始旋转90°。

UPDATE2:

       @Override
    public void onImageAvailable(ImageReader reader) {
        // ottiene il nuovo frame
        Image image = reader.acquireNextImage();

        if (image == null) {
            return;
        }


        //preparazione per RGBA output
        Image.Plane Y_plane = image.getPlanes()[0];
        int Y_rowStride = Y_plane.getRowStride();
        Image.Plane U_plane = image.getPlanes()[1];
        int UV_rowStride = U_plane.getRowStride();  //nelle immagini YUV, uPlane.getRowStride() == vPlane.getRowStride()
        Image.Plane V_plane = image.getPlanes()[2];
        JNIUtils.RGBADisplay(image.getWidth(), image.getHeight(), Y_rowStride, Y_plane.getBuffer(), UV_rowStride, U_plane.getBuffer(), UV_rowStride, V_plane.getBuffer(), surface);


        image.close();
    }

更新3:代码到native.cpp

Java_com_ndkvideoimagecapture_JNIUtils_RGBADisplay(
    JNIEnv *env, //env per consentire il passaggio di dati per riferimento
    jobject obj,
    jint srcWidth,
    jint srcHeight,
    jint Y_rowStride,
    jobject Y_Buffer,
    jint U_rowStride,
    jobject U_Buffer,
    jint V_rowStride,
    jobject V_Buffer,
    jobject surface) {

uint8_t *srcYPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(Y_Buffer));
uint8_t *srcUPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(U_Buffer));
uint8_t *srcVPtr = reinterpret_cast<uint8_t *>(env->GetDirectBufferAddress(V_Buffer));

ANativeWindow *window = ANativeWindow_fromSurface(env, surface);
ANativeWindow_acquire(window);
ANativeWindow_Buffer buffer;

ANativeWindow_setBuffersGeometry(window, srcWidth, srcHeight, WINDOW_FORMAT_RGBA_8888);

if (int32_t err = ANativeWindow_lock(window, &buffer, NULL)) {
    LOGE("ANativeWindow_lock failed with error code: %d\n", err);
    ANativeWindow_release(window);
}

    size_t bufferSize = buffer.width * buffer.height * (size_t)4;
uint8_t * outPtr = reinterpret_cast<uint8_t *>(buffer.bits);
for (size_t y = 0; y < srcHeight; y++)
{
    uint8_t * Y_rowPtr = srcYPtr + y * Y_rowStride;
    uint8_t * U_rowPtr = srcUPtr + (y >> 1) * U_rowStride;
    uint8_t * V_rowPtr = srcVPtr + (y >> 1) * V_rowStride;

    for (size_t x = 0; x < srcWidth; x++)
    {
        uint8_t Y = Y_rowPtr[x];
        uint8_t U = U_rowPtr[(x >> 1)];
        uint8_t V = V_rowPtr[(x >> 1)];

        double R = (Y + (V - 128) * 1.40625);
        double G = (Y - (U - 128) * 0.34375 - (V - 128) * 0.71875);
        double B = (Y + (U - 128) * 1.765625);

        *(outPtr + (--bufferSize)) = 255; // gamma for RGBA_8888
        *(outPtr + (--bufferSize)) = (uint8_t) (B > 255 ? 255 : (B < 0 ? 0 : B));
        *(outPtr + (--bufferSize)) = (uint8_t) (G > 255 ? 255 : (G < 0 ? 0 : G));
        *(outPtr + (--bufferSize)) = (uint8_t) (R > 255 ? 255 : (R < 0 ? 0 : R));

    }
}

ANativeWindow_unlockAndPost(window);
ANativeWindow_release(window);
}

1 个答案:

答案 0 :(得分:3)

我建议您阅读this related discussion(也有很好的截图,不要错过它们)。

TL; NR :ImageReader始终返回横向图像,与onPreviewFrame()相同。

如果将相机连接到纹理表面,显示效果会比程序可以执行的YUV➤RGB转换效率高得多,但我知道有很多情况需要RGB数据进行图像处理,并且有时你希望图像处理的结果显示为实时预览。

这实际上是OpenCV处理Android摄像头和实时预览的方式(不幸的是官方OpenCV不使用 camera2 API,但我发现one tutorial显示了这是怎么回事done)。

我强烈建议使用Renderscript进行YUV➤RGB转换。它不仅速度快,而且与在CPU中执行相比还可以节省大量电量。

与您的代码完全不同的一个问题是它假设显示窗口的宽高比与您从相机接收的图像相同,不是吗?

你不应该依赖于此。即使您固定90°旋转(如果窗口是纵向),您也可以see the image distorted。这对于camera2来说并不特殊,同样can happen with the deprecated camera API

但是,在您的情况下,解决方案可能会有所不同。您可以跳过一些输入像素以保持宽高比正确,而不是调整用于显示实时预览的窗口的几何图形。

简而言之,您需要

float srcAspectRatio = (float)srcWidth/srcHeight;
float outAspectRatio = (float)buffer.width/buffer.height;

int clippedSrcWidth =  outAspectRatio > srcAspectRatio ? srcWidth : (int)(0.5 + srcHeight*outAspectRatio);
int clippedSrcHeight = outAspectRatio < srcAspectRatio ? srcHeight : (int)(0.5 + srcWidth/outAspectRatio);

ANativeWindow_setBuffersGeometry(window, clippedSrcWidth, clippedSrcHeight, WINDOW_FORMAT_RGBA_8888);

for (size_t y = (srcHeight-clippedSrcHeight)/2; y < srcHeight - (srcHeight-clippedSrcHeight)/2; y++)

等等。

使用后台计数器访问 outPtr 的像素,您可能会对前置摄像头执行mirroring,不是吗?