I'm developing a camera app based on Camera API 2
and I have found several problems using the libyuv.
I want to convert YUV_420_888
images retrieved from a ImageReader, but I'm having some problems with scaling in a reprocessable surface.
In essence: Images come out with tones of green instead of having the corresponding tones (I'm exporting the .yuv files and checking them using http://rawpixels.net/).
You can see an input example here:
And what I get after I perform scaling:
I think I am doing something wrong with strides, or providing an invalid YUV format (maybe I have to transform the image to another format?). However, I can't figure out where is the error since I don't know how to correlate the green color to the scaling algorithm.
This is the conversion code I am using, you can ignore the return NULL as there is further processing that is not related to the problem.
#include <jni.h>
#include <stdint.h>
#include <android/log.h>
#include <inc/libyuv/scale.h>
#include <inc/libyuv.h>
#include <stdio.h>
#define LOG_TAG "libyuv-jni"
#define unused(x) UNUSED_ ## x __attribute__((__unused__))
#define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, __VA_ARGS_)
struct YuvFrame {
int width;
int height;
uint8_t *data;
uint8_t *y;
uint8_t *u;
uint8_t *v;
};
static struct YuvFrame i420_input_frame;
static struct YuvFrame i420_output_frame;
extern "C" {
JNIEXPORT jbyteArray JNICALL
Java_com_android_camera3_camera_hardware_session_output_photo_yuv_YuvJniInterface_scale420YuvByteArray(
JNIEnv *env, jclass /*clazz*/, jbyteArray yuvByteArray_, jint src_width, jint src_height,
jint out_width, jint out_height) {
jbyte *yuvByteArray = env->GetByteArrayElements(yuvByteArray_, NULL);
//Get input and output length
int input_size = env->GetArrayLength(yuvByteArray_);
int out_size = out_height * out_width;
//Generate input frame
i420_input_frame.width = src_width;
i420_input_frame.height = src_height;
i420_input_frame.data = (uint8_t *) yuvByteArray;
i420_input_frame.y = i420_input_frame.data;
i420_input_frame.u = i420_input_frame.y + input_size;
i420_input_frame.v = i420_input_frame.u + input_size / 4;
//Generate output frame
free(i420_output_frame.data);
i420_output_frame.width = out_width;
i420_output_frame.height = out_height;
i420_output_frame.data = new unsigned char[out_size * 3 / 2];
i420_output_frame.y = i420_output_frame.data;
i420_output_frame.u = i420_output_frame.y + out_size;
i420_output_frame.v = i420_output_frame.u + out_size / 4;
libyuv::FilterMode mode = libyuv::FilterModeEnum::kFilterBilinear;
int result = I420Scale(i420_input_frame.y, i420_input_frame.width,
i420_input_frame.u, i420_input_frame.width / 2,
i420_input_frame.v, i420_input_frame.width / 2,
i420_input_frame.width, i420_input_frame.height,
i420_output_frame.y, i420_output_frame.width,
i420_output_frame.u, i420_output_frame.width / 2,
i420_output_frame.v, i420_output_frame.width / 2,
i420_output_frame.width, i420_output_frame.height,
mode);
LOGD("Image result %d", result);
env->ReleaseByteArrayElements(yuvByteArray_, yuvByteArray, 0);
return NULL;
}
答案 0 :(得分:1)
您可以尝试使用 ...
//Get input and output length
int input_size = env->GetArrayLength(yuvByteArray_);
int y_size = src_width * src_height;
int out_size = out_height * out_width;
//Generate input frame
i420_input_frame.width = src_width;
i420_input_frame.height = src_height;
i420_input_frame.data = (uint8_t *) yuvByteArray;
i420_input_frame.y = i420_input_frame.data;
i420_input_frame.u = i420_input_frame.y + y_size;
i420_input_frame.v = i420_input_frame.u + y_size / 4;
//Generate output frame
free(i420_output_frame.data);
i420_output_frame.width = out_width;
i420_output_frame.height = out_height;
i420_output_frame.data = new unsigned char[out_size * 3 / 2];
i420_output_frame.y = i420_output_frame.data;
i420_output_frame.u = i420_output_frame.y + out_size;
i420_output_frame.v = i420_output_frame.u + out_size / 4;
...
而不是数组的完整大小的代码。
y_size
可能您的代码基于https://github.com/begeekmyfriend/yasea/blob/master/library/src/main/libenc/jni/libenc.cc,根据该代码,您必须使用ConvertDate = DateSerial(Year(DateValue(DueDate)), Month(DateValue(DueDate)), Day(DateValue(DueDate)))
答案 1 :(得分:1)
框架的输入大小存在问题:
应该是:
keytool -importkeystore -destkeystore new.p12 -deststoretype pkcs12 -srckeystore original.pfx
例如,如果您的帧为6x4
香奈儿 y 尺寸:6 * 4 = 24
int input_array_size = env->GetArrayLength(yuvByteArray_);
int input_size = input_array_size * 2 / 3; //This is the frame size
香奈儿你尺寸:3 * 2 = 6
1 2 3 4 5 6
_ _ _ _ _ _
|_|_|_|_|_|_| 1
|_|_|_|_|_|_| 2
|_|_|_|_|_|_| 3
|_|_|_|_|_|_| 4
香奈儿 v 尺寸:3 * 2 = 6
1 2 3
_ _ _ _ _ _
| | | |
|_ _|_ _|_ _| 1
| | | |
|_ _|_ _|_ _| 2
阵列尺寸= 6 * 4 + 3 * 2 + 3 * 2 = 36
但实际帧大小=通道 y 大小= 36 * 2/3 = 24
答案 2 :(得分:0)
gmetax几乎是正确的。
您正在使用整个数组的大小,您应该使用Y组件的大小src_width * src_height
。
gmetax的答案是错误的,因为他在定义输出框时已经y_size
代替out_size
。我相信,正确的代码片段如下所示:
//Get input and output length
int input_size = env->GetArrayLength(yuvByteArray_);
int y_size = src_width * src_height;
int out_size = out_height * out_width;
//Generate input frame
i420_input_frame.width = src_width;
i420_input_frame.height = src_height;
i420_input_frame.data = (uint8_t *) yuvByteArray;
i420_input_frame.y = i420_input_frame.data;
i420_input_frame.u = i420_input_frame.y + y_size;
i420_input_frame.v = i420_input_frame.u + y_size / 4;
//Generate output frame
free(i420_output_frame.data);
i420_output_frame.width = out_width;
i420_output_frame.height = out_height;
i420_output_frame.data = new unsigned char[out_size * 3 / 2];
i420_output_frame.y = i420_output_frame.data;
i420_output_frame.u = i420_output_frame.y + out_size;
i420_output_frame.v = i420_output_frame.u + out_size / 4;
答案 3 :(得分:0)
您正在尝试将YUV422图像缩放为YUV420,难怪颜色全都搞砸了。首先,您需要弄清楚YUV输入缓冲区的确切格式。从YUV_422_888的文档来看,它看起来可能代表平面格式和交错格式(如果像素跨度不是1)。从您的结果看,您的源是平面的并且Y平面的处理是可以的,但是您的错误在于处理U和V平面。要正确扩展:
U
和V
平面是否是交错的
平面。很可能它们也是平面的。ScalePlane
分别缩放U
和V
。也许
如果您进入I420Scale
,则会为个人调用ScalePlane
飞机。同样,但为U
和V
使用正确的\ n \ n“
飞机(每个飞机比I420Scale预期的大两倍)。一些提示如何判断您是否有平面或交错U
和V
:尝试跳过图像缩放并保存它,以确保获得正确的结果(与源相同) )。然后尝试将U
帧或V
帧清零,看看你得到了什么。如果U
和V
是平面的并且您将U
平面设置为零,则应该看到整个图片更改颜色。如果它们是交错的,你将获得一半的图像改变而另一个保持不变。同样,您可以检查有关飞机尺寸,线条和偏移的假设。一旦确定YUV格式和布局,如果输入是平面的,则可以缩放单个平面,或者如果首先有交错输入,则需要对平面进行解交织,然后进行缩放。
或者,你可以使用ffmpeg / libav中的libswscale并尝试不同的格式来找到正确的格式,然后使用libyuv。
答案 4 :(得分:0)
绿色图像是由其中一个平面充满0引起的。这意味着其中一架飞机是空的。这是因为我从YUV NV21而不是YUV I420进行转换。来自Android相机框架的图像是I420 YUV。
我们需要将它们转换为YUV I420才能与Libyuv一起正常工作。之后,我们可以开始使用该库为您提供的多种操作。例如旋转,缩放等。
以下是缩放方法的外观:
JNIEXPORT jint JNICALL
Java_com_aa_project_images_yuv_myJNIcl_scaleI420(JNIEnv *env, jclass type,
jobject srcBufferY,
jobject srcBufferU,
jobject srcBufferV,
jint srcWidth, jint srcHeight,
jobject dstBufferY,
jobject dstBufferU,
jobject dstBufferV,
jint dstWidth, jint dstHeight,
jint filterMode) {
const uint8_t *srcY = static_cast<uint8_t *>(env->GetDirectBufferAddress(srcBufferY));
const uint8_t *srcU = static_cast<uint8_t *>(env->GetDirectBufferAddress(srcBufferU));
const uint8_t *srcV = static_cast<uint8_t *>(env->GetDirectBufferAddress(srcBufferV));
uint8_t *dstY = static_cast<uint8_t *>(env->GetDirectBufferAddress(dstBufferY));
uint8_t *dstU = static_cast<uint8_t *>(env->GetDirectBufferAddress(dstBufferU));
uint8_t *dstV = static_cast<uint8_t *>(env->GetDirectBufferAddress(dstBufferV));
return libyuv::I420Scale(srcY, srcWidth,
srcU, srcWidth / 2,
srcV, srcWidth / 2,
srcWidth, srcHeight,
dstY, dstWidth,
dstU, dstWidth / 2,
dstV, dstWidth / 2,
dstWidth, dstHeight,
static_cast<libyuv::FilterMode>(filterMode));
}