使用以下代码,我从UIImage
ALAsset
ALAssetRepresentation * assetRepresentation = [asset defaultRepresentation]; // Big Picture
imageRef = [assetRepresentation fullResolutionImage];
if (imageRef)
{
UIImage* image = [UIImage imageWithCGImage: imageRef];
}
现在我使用下面的代码从UIImage
创建一个缓冲区:
+ (unsigned char *) convertUIImageToBitmapRGBA8:(UIImage *) image {
CGImageRef imageRef = image.CGImage;
// Create a bitmap context to draw the uiimage into
CGContextRef context = [self newBitmapRGBA8ContextFromImage:imageRef];
if(!context) {
return NULL;
}
size_t width = CGImageGetWidth(imageRef);
size_t height = CGImageGetHeight(imageRef);
CGRect rect = CGRectMake(0, 0, width, height);
// Draw image into the context to get the raw image data
CGContextDrawImage(context, rect, imageRef);
// Get a pointer to the data
unsigned char *bitmapData = (unsigned char *)CGBitmapContextGetData(context);
// Copy the data and release the memory (return memory allocated with new)
size_t bytesPerRow = CGBitmapContextGetBytesPerRow(context);
size_t bufferLength = bytesPerRow * height;
unsigned char *newBitmap = NULL;
if(bitmapData) {
newBitmap = (unsigned char *)malloc(sizeof(unsigned char) * bytesPerRow * height);
if(newBitmap) { // Copy the data
for(int i = 0; i < bufferLength; ++i) {
newBitmap[i] = bitmapData[i];
}
}
free(bitmapData);
} else {
NSLog(@"Error getting bitmap pixel data\n");
}
CGContextRelease(context);
return newBitmap;
}
现在,我使用以下代码将缓冲区转换回UIImage
:
+ (UIImage *) convertBitmapRGBA8ToUIImage:(unsigned char *) buffer withWidth:(int) width withHeight:(int) height
{
size_t bufferLength = width * height * 4;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = 32;
size_t bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
if(colorSpaceRef == NULL) {
NSLog(@"Error allocating color space");
CGDataProviderRelease(provider);
return nil;
}
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider, // data provider
NULL, // decode
YES, // should interpolate
renderingIntent);
uint32_t* pixels = (uint32_t*)malloc(bufferLength);
if(pixels == NULL) {
NSLog(@"Error: Memory not allocated for bitmap");
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
return nil;
}
CGContextRef context = CGBitmapContextCreate(pixels,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpaceRef,
bitmapInfo);
if(context == NULL) {
NSLog(@"Error context not created");
free(pixels);
}
UIImage *image = nil;
if(context)
{
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Support both iPad 3.2 and iPhone 4 Retina displays with the correct scale
if([UIImage respondsToSelector:@selector(imageWithCGImage:scale:orientation:)]) {
float scale = [[UIScreen mainScreen] scale];
image = [UIImage imageWithCGImage:imageRef scale:scale orientation:UIImageOrientationUp];
} else {
image = [UIImage imageWithCGImage:imageRef];
}
CGImageRelease(imageRef);
CGContextRelease(context);
}
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
CGDataProviderRelease(provider);
if(pixels) {
free(pixels);
}
return image;
}
+ (CGContextRef) newBitmapRGBA8ContextFromImage:(CGImageRef) image {
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
uint32_t *bitmapData;
size_t bitsPerPixel = 32;
size_t bitsPerComponent = 8;
size_t bytesPerPixel = bitsPerPixel / bitsPerComponent;
size_t width = CGImageGetWidth(image);
size_t height = CGImageGetHeight(image);
size_t bytesPerRow = width * bytesPerPixel;
size_t bufferLength = bytesPerRow * height;
colorSpace = CGColorSpaceCreateDeviceRGB();
if(!colorSpace) {
NSLog(@"Error allocating color space RGB\n");
return NULL;
}
// Allocate memory for image data
bitmapData = (uint32_t *)malloc(bufferLength);
if(!bitmapData) {
NSLog(@"Error allocating memory for bitmap\n");
CGColorSpaceRelease(colorSpace);
return NULL;
}
//Create bitmap context
context = CGBitmapContextCreate(bitmapData,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedLast); // RGBA
if(!context) {
free(bitmapData);
NSLog(@"Bitmap context not created");
}
CGColorSpaceRelease(colorSpace);
return context;
}
然后..保存到相册:
+(void) saveImageToPhotoAlbum : (UIImage*) image
{
if( image != nil)
{
NSData* imageData = UIImagePNGRepresentation(image); // get png representation
UIImage* pngImage = [UIImage imageWithData:imageData];
UIImageWriteToSavedPhotosAlbum(pngImage, self, nil, nil);
}
else
{
NSLog(@"Couldn't save to Photo Album due to invalid image..");
}
}
我的电话如下
void *imageData = [self convertUIImageToBitmapRGBA8 : image];
UIImage* uiImage = [self convertBitmapRGBA8ToUIImage : imageData withWidth:width withHeight: height];
[self saveImageToPhotoAlbum:uiImage];
当我这样做时......文件大小发生变化,两个文件看起来都不一样..
E,G;如果原始文件大小是33KB,经过这个过程,它会改变332KB ..这里有什么问题?
答案 0 :(得分:1)
是的,将图片加载到UIImage
然后调用UIImagePNGRepresentation
(甚至UIImageJPEGRepresentation
)可能会导致文件大小发生变化。从文件大小猜测,看起来原始图像是JPEG。 PNG文件通常比压缩的JPEG文件大,即使它们只有适度的压缩程度。如果您想要一个大小相当的文件,请尝试使用各种质量设置UIImageJPEGRepresentation
(例如compressionQuality
为0.99或0.9或0.8)。
另外,只是将图像加载到UIImage
中的练习可能会导致原始资产发生一些变化(如果没有别的话,剥离元数据,可能会改变颜色空间等)。