使用GL着色器语言将相机框架yuv转换为rgb

时间:2013-04-08 13:22:55

标签: android opengl-es shader rgb cocos2d-x

我从Android相机预览回调中获取相机框架的字节数组并将其传递给jni代码。因为我们不能在c ++中使用byte,所以我将它转换为整数数组,如下所示:

    JNIEXPORT void JNICALL Java_com_omobio_armadillo_Armadillo_onAndroidCameraFrameNative(
            JNIEnv* env, jobject, jbyteArray data, jint dataLen, jint width,
            jint height, jint bitsPerComponent) {
        Armadillo *armadillo = Armadillo::singleton();

        jbyte *jArr = env->GetByteArrayElements(data, NULL);
        int dataChar[dataLen];
        for (int i = 0; i < dataLen; i++) {
            dataChar[i] = (int) jArr[i];
    }

然后我将其设置为CCImage以创建纹理,如下所示:

 void AppClass::drawAndroidCameraFrame() {

CCLOG("drawAndroidCameraFrame");
int nextBufferIndex = !_bufferIndex;
if (mIsNewFrameReceived) {
    mIsNewFrameReceived = false;
    return;
}
CCLOG("drawAndroidCameraFrame - creating CCImage");
_image[nextBufferIndex] = new CCImage();
_image[nextBufferIndex]->initWithImageData(mFramePData, mFrameDataLen,
        mFrameFormat, mFrameWidth, mFrameHeight, mBitsPerComponent);
if (mIsNewFrameReceived) {
    CCLOG("drawAndroidCameraFrame = relasing frame image");
    _image[nextBufferIndex]->release();
    mIsNewFrameReceived = false;
    CCLOG("camera frame process cancelled 2");
    return;
}
CCLOG("drawAndroidCameraFrame - creating texture2d");
_texture[nextBufferIndex] = new CCTexture2D();
_texture[nextBufferIndex]->initWithImage(_image[nextBufferIndex]);

    if (!_videoSprite) {
    CCLOG("Creating new sprite");

    if (mIsNewFrameReceived) {
        CCLOG("drawAndroidCameraFrame - releasing image an texture");
        _image[nextBufferIndex]->release();
        _texture[nextBufferIndex]->release();
        mIsNewFrameReceived = false;
        CCLOG("camera frame process cancelled 3");
        return;
    }

    CCLOG("drawAndroidCameraFrame - creating video sprite");
    _videoSprite = new CollisionBitmapSprite();
    _videoSprite->initWithTexture(_texture[nextBufferIndex]);

    //get director
    CCDirector *director = CCDirector::sharedDirector();

    // ask director the window size
    CCSize size = director->getWinSize();
    // position the sprite on the center of the screen
    _videoSprite->setPosition(ccp(size.width/2, size.height/2));

    //get scale factor
    CCSize* imageSize = new CCSize(_image[nextBufferIndex]->getWidth(),
            _image[nextBufferIndex]->getHeight());

    CCSize scale = getCameraFrameScaleFactor(*imageSize);
    //      CCLOG ("Scale factor is x=%f and y=%f", scale.width, scale.height);

    _videoSprite->setScaleX(scale.width);
    _videoSprite->setScaleY(scale.height);

    if (mIsNewFrameReceived) {
        _image[nextBufferIndex]->release();
        _texture[nextBufferIndex]->release();
        mIsNewFrameReceived = false;
        CCLOG("camera frame process cancelled 4");
        return;
    }

    _videoSprite->setTexture(_texture[nextBufferIndex]);

                Shaders::addProgram(_videoSprite, (char *)     Shaders::textureVertShader,
    mFrameWidth, mFrameHeight);
    GLuint i =Shaders::addProgram(_videoSprite, (char *) Shaders::vertShader,
            (char *) Shaders::yuvtorgb);
        Shaders::setYuvtorgbParameters(_videoSprite,i);
    addChild(_videoSprite, -1);

} else {
    _videoSprite->setTexture(_texture[nextBufferIndex]);
}
//  CCLOG ("Armadillo::drawCameraFrame completed successfully");
//release memory
if (_image[_bufferIndex]) {
    _image[_bufferIndex]->release();
}

if (_texture[_bufferIndex]) {
    _texture[_bufferIndex]->release();
}

_bufferIndex = nextBufferIndex;

 }

由于图像是YUV(N21)格式,因此我将着色器应用于可以将图像帧转换为rgb的帧。着色器程序如下:

Fragment Shader:

const char *Shaders::yuvtorgb = MULTI_LINE_STRING(
        precision highp float;
        varying vec2 v_yTexCoord;
        varying vec4 v_effectTexCoord;

        uniform sampler2D y_texture;
        uniform sampler2D u_texture;
        uniform sampler2D v_texture;

        void main()
        {
            float y = texture2D(y_texture, v_yTexCoord).r;
            float u = texture2D( u_texture, v_yTexCoord ).r;
            float v = texture2D( v_texture, v_yTexCoord ).r;


            y = 1.1643 * ( y - 0.0625 );

            u = u - 0.5;
            v = v - 0.5;

            float r = y + 1.5958 * v;
            float g = y - 0.39173 * u - 0.81290 * v;
            float b = y + 2.017 * u;

            gl_FragColor = vec4(r,g,b, 1.0);
        }
);

顶点着色器:

const char *Shaders::vertShader = MULTI_LINE_STRING(
        attribute vec4 a_position;
        attribute vec2 a_yTexCoord;
        attribute vec4 a_effectTexCoord;

        varying vec2 v_yTexCoord;
        varying vec4 v_effectTexCoord;
        uniform mat4 u_MVPMatrix;
        void main()
        {
            v_yTexCoord = a_yTexCoord;
            v_effectTexCoord = a_effectTexCoord;
            gl_Position = u_MVPMatrix * a_position;
        }
);

添加程序方法:

GLuint Shaders::addProgram(CCSprite *sprite, char *vertShader,
            char*fragShader) {
        CCGLProgram *glProgram = new CCGLProgram();
        if (!glProgram->initWithVertexShaderByteArray(vertShader, fragShader)) {
        CCLOG("Shader problem: %s\n %s \n%s", glProgram->vertexShaderLog(), glProgram->fragmentShaderLog(), glProgram->programLog());
    }

    glProgram->addAttribute(kCCAttributeNamePosition, kCCVertexAttrib_Position);
    glProgram->addAttribute(kCCAttributeNameTexCoord,
            kCCVertexAttrib_TexCoords);
    if (!glProgram->link()) {
        CCLOG(
                "Shader problem: %s\n %s \n%s",      glProgram->vertexShaderLog(), glProgram->fragmentShaderLog(), glProgram->programLog());
    }
    glProgram->updateUniforms();

    sprite->setShaderProgram(glProgram);
    return glProgram->getProgram();
}

然后我将着色器应用于帧精灵:

GLuint i =Shaders::addProgram(_videoSprite, (char *) Shaders::vertShader,
                (char *) Shaders::yuvtorgb);

我正在获得绿色和粉红色的图像框。暗部分变为绿色,亮部分以粉红色显示。

生成的图片网址如下:enter image description here

我被困在她身边而没有找到任何适当的解决方案。 任何人都可以帮助解决这个问题吗?

1 个答案:

答案 0 :(得分:0)

我不确定您是否尝试从相机获取RGB以便在手机外使用它。但也许你可以在android中将YUV转换为RGB,然后传递RGB像素阵列?

以下是我用来转换为RGB的代码:

打开我用过的相机:

try {
    camera = Camera.open();
    cameraParam = camera.getParameters();
    cameraParam.setPreviewFormat(ImageFormat.NV21);
    List<int[]> fps = cameraParam.getSupportedPreviewFpsRange();
    camera.setDisplayOrientation(90);
    camera.setParameters(cameraParam);
    cameraParam = camera.getParameters();
    camera.startPreview();

    // wait for frames to come in
    camera.setPreviewCallback(new PreviewCallback() {
        @Override
        public void onPreviewFrame(byte[] data, Camera camera) {
            frameHeight = camera.getParameters().getPreviewSize().height;
             frameWidth = camera.getParameters().getPreviewSize().width;
             int rgb[] = new int[frameWidth * frameHeight]; // number of pixels
             // the following returns a pixel array in RGB format
             byte[] bytes = decodeYUV420SP(rgb, data, frameWidth, frameHeight);
        }
    });
} catch (Exception e) {
    Log.e("camera", "  error camera  ");
}

我从不同的帖子中获得的decodeYUV420SP您可以找到它here

这是以上帖子中的代码本身:

//  Byte decoder : ---------------------------------------------------------------------
int[] decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
    Log.e("camera", "   decodeYUV420SP  ");
    Log.e("camera", "   Clearing Sums and Pixel Arrays  ");
    sumRED = 0;
    sumGREEN = 0;
    sumBLUE = 0;
    rStandardDeviation.clear();
    gStandardDeviation.clear();
    bStandardDeviation.clear();
    // TODO Auto-generated method stub
    final int frameSize = width * height;

    for (int j = 0, yp = 0; j < height; j++) {
        int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
        for (int i = 0; i < width; i++, yp++) {
            int y = (0xff & (yuv420sp[yp])) - 16;
            if (y < 0)
                y = 0;
            if ((i & 1) == 0) {
                v = (0xff & yuv420sp[uvp++]) - 128;
                u = (0xff & yuv420sp[uvp++]) - 128;
            }

            int y1192 = 1192 * y;
            int r = (y1192 + 1634 * v);
            int g = (y1192 - 833 * v - 400 * u);
            int b = (y1192 + 2066 * u);

            if (r < 0)
                r = 0;
            else if (r > 262143)
                r = 262143;
            if (g < 0)
                g = 0;
            else if (g > 262143)
                g = 262143;
            if (b < 0)
                b = 0;
            else if (b > 262143)
                b = 262143;

            rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);

        }
    }
    return rgb;
} 

然后,一旦通过运行decodeYUV420SP回读返回rgb像素数组,您可以通过以下方式重建图像:

Bitmap bitmap= BitmapFactory.decodeByteArray(bytes, 0, bytes.length);

希望它有所帮助。我的代码可能有错误,仔细检查一下,但总的来说它对我有用。