转换为灰度 - 太慢了

时间:2009-08-21 09:47:00

标签: iphone cocoa-touch

我创建了一个将图像转换为灰度的类。但它的工作方式太慢了。有没有办法让它更快地运作?

这是我的班级:

@implementation PixelProcessing

SYNTHESIZE_SINGLETON_FOR_CLASS(PixelProcessing);

#define bytesPerPixel 4
#define bitsPerComponent 8


-(UIImage*)scaleAndRotateImage: (UIImage*)img withMaxResolution: (int)kMaxResolution
{
    CGImageRef imgRef = img.CGImage;

    CGFloat width = CGImageGetWidth(imgRef);
    CGFloat height = CGImageGetHeight(imgRef);


    CGAffineTransform transform = CGAffineTransformIdentity;
    CGRect bounds = CGRectMake(0, 0, width, height);

    if ( (kMaxResolution != 0) && (width > kMaxResolution || height > kMaxResolution) ) {
        CGFloat ratio = width/height;
        if (ratio > 1) {
            bounds.size.width = kMaxResolution;
            bounds.size.height = bounds.size.width / ratio;
        }
        else {
            bounds.size.height = kMaxResolution;
            bounds.size.width = bounds.size.height * ratio;
        }
    }

    CGFloat scaleRatio;
    if (kMaxResolution != 0){
        scaleRatio = bounds.size.width / width;
    } else
    {
        scaleRatio = 1.0f;
    }

    CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
    CGFloat boundHeight;
    UIImageOrientation orient = img.imageOrientation;
    switch(orient) {

        case UIImageOrientationUp: //EXIF = 1
            transform = CGAffineTransformIdentity;
            break;

        case UIImageOrientationUpMirrored: //EXIF = 2
            transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
            transform = CGAffineTransformScale(transform, -1.0, 1.0);
            break;

        case UIImageOrientationDown: //EXIF = 3
            transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
            transform = CGAffineTransformRotate(transform, M_PI);
            break;

        case UIImageOrientationDownMirrored: //EXIF = 4
            transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
            transform = CGAffineTransformScale(transform, 1.0, -1.0);
            break;

        case UIImageOrientationLeftMirrored: //EXIF = 5
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
            transform = CGAffineTransformScale(transform, -1.0, 1.0);
            transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
            break;

        case UIImageOrientationLeft: //EXIF = 6
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
            transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
            break;

        case UIImageOrientationRightMirrored: //EXIF = 7
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeScale(-1.0, 1.0);
            transform = CGAffineTransformRotate(transform, M_PI / 2.0);
            break;

        case UIImageOrientationRight: //EXIF = 8
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
            transform = CGAffineTransformRotate(transform, M_PI / 2.0);
            break;

        default:
            [NSException raise:NSInternalInconsistencyException format: @"Invalid image orientation"];

    }

    UIGraphicsBeginImageContext(bounds.size);

    CGContextRef context = UIGraphicsGetCurrentContext();

    if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
        CGContextScaleCTM(context, -scaleRatio, scaleRatio);
        CGContextTranslateCTM(context, -height, 0);
    }
    else {
        CGContextScaleCTM(context, scaleRatio, -scaleRatio);
        CGContextTranslateCTM(context, 0, -height);
    }

    CGContextConcatCTM(context, transform);

    CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
    UIImage *tempImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();

    return tempImage;
}


#pragma mark Getting Ans Writing Pixels
-(float*) getColorForPixel: (NSUInteger)xCoordinate andForY: (NSUInteger)yCoordinate
{
    int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;

    float *colorToReturn = malloc(3);
    colorToReturn[0] = bitmap[byteIndex] / 255.f;   //Red
    colorToReturn[1] = bitmap[byteIndex + 1] / 255.f;   //Green
    colorToReturn[2] = bitmap[byteIndex + 2] / 255.f;   //Blue

    return colorToReturn;
}

-(void) writeColor: (float*)colorToWrite forPixelAtX: (NSUInteger)xCoordinate andY: (NSUInteger)yCoordinate
{
    int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;

    bitmap[byteIndex] = (unsigned char) ( colorToWrite[0] * 255);
    bitmap[byteIndex + 1] = (unsigned char) ( colorToWrite[1] * 255);
    bitmap[byteIndex + 2] = (unsigned char) ( colorToWrite[2] * 255);
}

#pragma mark Bitmap

-(float) getAverageBrightnessForImage: (UIImage*)img
{
    UIImage *tempImage = [self scaleAndRotateImage: img withMaxResolution: 100];

    unsigned char *rawData = [self getBytesForImage: tempImage];

    double aBrightness = 0;

    for(int y = 0; y < tempImage.size.height; y++) {
        for(int x = 0; x < tempImage.size.width; x++) {
            int byteIndex = ( (tempImage.size.width * y) + x) * bytesPerPixel;

            aBrightness += (rawData[byteIndex] + rawData[byteIndex + 1] + rawData[byteIndex + 2]);


        }
    }

    free(rawData);

    aBrightness /= 3.0f;
    aBrightness /= 255.0f;
    aBrightness /= tempImage.size.width * tempImage.size.height;

    return aBrightness;
}

-(unsigned char*) getBytesForImage: (UIImage*)pImage
{
    CGImageRef image = [pImage CGImage];
    NSUInteger width = CGImageGetWidth(image);
    NSUInteger height = CGImageGetHeight(image);

    bytesPerRow = bytesPerPixel * width;

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    unsigned char *rawData = malloc(height * width * bytesPerPixel);
    CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast);
    CGColorSpaceRelease(colorSpace);

    CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
    CGContextRelease(context);

    return rawData;
}

-(void) loadWithImage: (UIImage*)img
{   
    averageBrightness = [self getAverageBrightnessForImage: img];
    currentImage = [self scaleAndRotateImage: img withMaxResolution: 0];

    imgWidth = currentImage.size.width;
    imgHeight = currentImage.size.height;

    bitmap = [self getBytesForImage: currentImage];

    bytesPerRow = bytesPerPixel * imgWidth;
}

-(void) processImage
{   
    // now convert to grayscale

    for(int y = 0; y < imgHeight; y++) {
        for(int x = 0; x < imgWidth; x++) {
            float *currentColor = [self getColorForPixel: x andForY: y];

            //Grayscale
            float averageColor = (currentColor[0] + currentColor[1] + currentColor[2]) / 3.0f;

            averageColor += 0.5f - averageBrightness;

            if (averageColor > 1.0f) averageColor = 1.0f;

            currentColor[0] = averageColor;
            currentColor[1] = averageColor;
            currentColor[2] = averageColor;

            [self writeColor: currentColor forPixelAtX: x andY: y];

            free(currentColor);
        }
    }
}

-(UIImage*) getProcessedImage
{
    // create a UIImage
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(bitmap, imgWidth, imgHeight, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast);
    CGImageRef image = CGBitmapContextCreateImage(context);
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    UIImage *resultUIImage = [UIImage imageWithCGImage: image];
    CGImageRelease(image);

    return resultUIImage;
}

-(void) releaseCurrentImage
{
    free(bitmap);
}


@end

我按以下方式将图像转换为灰度:

    [ [PixelProcessing sharedPixelProcessing] loadWithImage: imageToDisplay.image];
    [ [PixelProcessing sharedPixelProcessing] processImage];
    imageToDisplay.image = [ [PixelProcessing sharedPixelProcessing] getProcessedImage];
    [ [PixelProcessing sharedPixelProcessing] releaseCurrentImage];

为什么这么慢?有没有办法获得像素的RGB颜色分量的浮点值?我该如何优化呢?

感谢。

4 个答案:

答案 0 :(得分:12)

您可以让Quartz为您进行灰度转换:

CGImageRef grayscaleCGImageFromCGImage(CGImageRef inputImage) {
    size_t width = CGImageGetWidth(inputImage);
    size_t height = CGImageGetHeight(inputImage);

    // Create a gray scale context and render the input image into that
    CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceGray();
    CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8, 
                             4*width, colorspace, kCGBitmapByteOrderDefault);
    CGContextDrawImage(context, CGRectMake(0,0, width,height), inputImage);

    // Get an image representation of the grayscale context which the input
    //    was rendered into.
    CGImageRef outputImage = CGBitmapContextCreateImage(context);

    // Cleanup
    CGContextRelease(context);
    CGColorSpaceRelease(colorspace);
    return (CGImageRef)[(id)outputImage autorelease];
}

答案 1 :(得分:8)

我最近必须解决同样的问题并提出以下代码(它也保留了alpha):

@implementation UIImage (grayscale)

typedef enum {
    ALPHA = 0,
    BLUE = 1,
    GREEN = 2,
    RED = 3
} PIXELS;

- (UIImage *)convertToGrayscale {
    CGSize size = [self size];
    int width = size.width;
    int height = size.height;

    // the pixels will be painted to this array
    uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));

    // clear the pixels so any transparency is preserved
    memset(pixels, 0, width * height * sizeof(uint32_t));

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();

    // create a context with RGBA pixels
    CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, 
                                                 kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);

    // paint the bitmap to our context which will fill in the pixels array
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);

    for(int y = 0; y < height; y++) {
        for(int x = 0; x < width; x++) {
            uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];

            // convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
            uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE];

            // set the pixels to gray
            rgbaPixel[RED] = gray;
            rgbaPixel[GREEN] = gray;
            rgbaPixel[BLUE] = gray;
        }
    }

    // create a new CGImageRef from our context with the modified pixels
    CGImageRef image = CGBitmapContextCreateImage(context);

    // we're done with the context, color space, and pixels
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    free(pixels);

    // make a new UIImage to return
    UIImage *resultUIImage = [UIImage imageWithCGImage:image];

    // we're done with image now too
    CGImageRelease(image);

    return resultUIImage;
}

@end

答案 2 :(得分:3)

找出速度问题的方法是使用Shark进行配置文件。 (在Xcode中,运行 - >从性能工具开始 - > Shark。)然而,在这种情况下,我觉得主要问题是每像素malloc / free,浮点运算和两种方法调用内部处理循环。

为了避免malloc / free,你想要做这样的事情:

- (void) getColorForPixelX:(NSUInteger)x y:(NSUInteger)y pixel:(float[3])pixel
{ /* Write stuff to pixel[0], pixel[1], pixel[2] */ }

// To call:
float pixel[3];
for (each pixel)
{
    [self getColorForPixelX:x y:y pixel:pixel];
    // Do stuff
}

减速的第二个可能来源是使用浮点 - 或者更确切地说,转换为浮点数和从浮点数转换的成本。对于您正在编写的过滤器,使用整数数学很简单 - 添加整数像素值并除以255 * 3。 (顺便说一下,这是转换为灰度的一种非常糟糕的方式。请参阅 http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale。)

方法调用速度很快,但与过滤器的基本算法相比仍然很慢。 (对于某些数字,请参阅this article。)消除方法调用的简单方法是使用内联函数替换它们。

答案 3 :(得分:1)

您是否尝试过使用光度混合模式?使用该混合模式与原始图像混合的白色图像似乎会产生灰度。 这两个,右侧的前景图像和左侧的背景图像:

alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/both_images.jpg

kCGBlendModeLuminosity混合后会产生以下结果:

alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/luminosity_image.jpg

有关详细信息,请参阅:Drawing with Quartz 2D: Using Blend Modes With Images