Stucky抖动,因为自定义CIKernel不起作用

时间:2017-12-27 14:01:42

标签: ios core-image dithering cikernel

我正在尝试将Stucky抖动(错误扩散)作为CIKernel实现,但我有点迷失。我找不到调试过滤器的方法(我是CIKernel的新手)。 这是我到目前为止所提出的,但是内核没有编译,而且,我想知道如何将传播的错误存储到目标像素。 欢迎帮助。是否有工具/方法来跟踪或调试代码?

   float mDiffCoef = [
        0,    0     0,    8/42, 4/42,
        2/42, 4/42, 8/42, 4/42, 2/42,
        1/42, 2/42, 4/42, 2/42, 1/42
    ];

    kernel vec4 dither(__sample image) {

        vec2 coord = samplerCoord(image);
        vec4 pixel = sample(image, coord);

        // decompress pixel into rgb
        float red   = pixel.r;

        float color = 0;
        if (red >= 127) color = 255;

        float err = red - color;

        // Spread the error according to the matrix
        for (int x = -2; x <= 2; x++) {
            for (int y = 0; y <= 3; y++) {
                vec2 workingSpaceCoordinate = destCoord() + vec2(x,y);
                vec2 imageSpaceCoordinate = samplerTransform(image, workingSpaceCoordinate);
                vec3 color = sample(image, imageSpaceCoordinate).rgb;
                color += err * mDiffCoef[(2+x)+5*y];
            }
        }

        return vec4(color, 1.0);
    }

[更新了删除语法错误的代码]:

kernel vec4 dither(__sample image) {

    float mDiffCoef[3 * 5];
    int i = 0;
    mDiffCoef[i++] = 0.;
    mDiffCoef[i++] = 0.;
    mDiffCoef[i++] = 0.;
    mDiffCoef[i++] = 8./42.;
    mDiffCoef[i++] = 4./42.;
    mDiffCoef[i++] = 2./42.;
    mDiffCoef[i++] = 4./42.;
    mDiffCoef[i++] = 8./42.;
    mDiffCoef[i++] = 4./42.;
    mDiffCoef[i++] = 2./42.;
    mDiffCoef[i++] = 1./42.;
    mDiffCoef[i++] = 2./42.;
    mDiffCoef[i++] = 4./42.;
    mDiffCoef[i++] = 2./42.;
    mDiffCoef[i++] = 1./42.;

    vec2 coord = samplerCoord(image);
    vec4 pixel = sample(image, coord);

    // decompress pixel into rgb
    float red   = pixel.r;

    float c = 0.;
    if (red >= 127.) c = 255.;

    float err = red - c;
    float color = 0.;

    // Spread the error according to the matrix
    for (int x = -2; x <= 2; x++) {
        for (int y = 0; y < 3; y++) {
            vec2 workingSpaceCoordinate = destCoord() + vec2(x,y);
            vec2 imageSpaceCoordinate = samplerTransform(image, workingSpaceCoordinate);

            float c = sample(image, imageSpaceCoordinate).r; //gb;
            color = c + err * mDiffCoef[(2+x)+5*y];
        }
    }


    color = clamp(color, 0., 255.);
    return vec4(255., color, color, 1.);

我的主要问题是如何通过累积误差扩散的值部分来编写outpu像素缓冲区?

0 个答案:

没有答案