我有一个使用AVFoundation的照片应用程序。拍摄照片时,可以完成一些操作,例如旋转图像,将图像裁剪到正确的尺寸等等。
问题是,经过3-4次这样的应用程序最终会崩溃。在拍摄照片的整个过程中,NSLog会收到内存警告。
好消息是我昨晚能够使用这个单一的代码声明来解决这个问题:CGImageRelease(imageRef);
然而,应用程序在完成照片旋转和裁剪时仍然有点慢,仪器显示我的图像旋转方法实现占用了太多时间,我还想确保没有任何其他对象我应该发布或设置为nil。
以下是用户按下按钮拍摄照片时运行的所有代码:
-(IBAction)stillImageCapture {
self.imageData = [[NSData alloc]init];
_hasUserTakenAPhoto = @"YES";
AVCaptureConnection *videoConnection = nil;
videoConnection.videoOrientation = AVCaptureVideoOrientationPortrait;
for (AVCaptureConnection *connection in _stillImageOutput.connections){
for (AVCaptureInputPort *port in [connection inputPorts]){
if ([[port mediaType] isEqual:AVMediaTypeVideo]){
videoConnection = connection;
break;
}
}
if (videoConnection) {
break;
}
}
[_stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError *error) {
if(imageDataSampleBuffer) {
self.imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer];
self.image = [[UIImage alloc]initWithData:self.imageData];
_subLayer = [CALayer layer];
if(_inputDevice.position == 2) {
self.image = [self selfieCorrection:self.image];
self.image = [self rotate:self.image andOrientation:self.image.imageOrientation];
} else {
self.image = [self rotate:self.image andOrientation:self.image.imageOrientation];
}
CGFloat widthToHeightRatio = _previewLayer.bounds.size.width / _previewLayer.bounds.size.height;
CGRect cropRect;
// Set the crop rect's smaller dimension to match the image's smaller dimension, and
// scale its other dimension according to the width:height ratio.
if (self.image.size.width < self.image.size.height) {
cropRect.size.width = self.image.size.width;
cropRect.size.height = cropRect.size.width / widthToHeightRatio;
} else {
cropRect.size.width = self.image.size.height * widthToHeightRatio;
cropRect.size.height = self.image.size.height;
}
// Center the rect in the longer dimension
if (cropRect.size.width < cropRect.size.height) {
cropRect.origin.x = 0;
cropRect.origin.y = (self.image.size.height - cropRect.size.height)/2.0;
} else {
cropRect.origin.x = (self.image.size.width - cropRect.size.width)/2.0;
cropRect.origin.y = 0;
float cropValueDoubled = self.image.size.height - cropRect.size.height;
float final = cropValueDoubled/2;
//We now have our perfect x coordinate for our crop below.
finalXValueForCrop = final;
//The crop is now working perfectly for BOTH selfies and regular photos.
}
if(_inputDevice.position == 2) {
CGRect cropRectFinal = CGRectMake(cropRect.origin.x, finalXValueForCrop, cropRect.size.width, cropRect.size.height);
CGImageRef imageRef = CGImageCreateWithImageInRect([self.image CGImage], cropRectFinal);
_subLayer.contents = (id)[UIImage imageWithCGImage:imageRef].CGImage;
CGImageRelease(imageRef);
} else {
CGRect cropRectFinal = CGRectMake(cropRect.origin.x, finalXValueForCrop, cropRect.size.width, cropRect.size.height);
CGImageRef imageRef = CGImageCreateWithImageInRect([self.image CGImage], cropRectFinal);
_subLayer.contents = (id)[UIImage imageWithCGImage:imageRef].CGImage;
CGImageRelease(imageRef);
}
_subLayer.frame = _previewLayer.frame;
[_takingPhotoView setHidden:YES];
[_previewLayer addSublayer:_subLayer];
if ([_hasUserTakenAPhoto isEqual:@"YES"]) {
//[_takingPhotoView setHidden:YES];
[_afterPhotoView setHidden:NO];
}
}
}];
}
注意我是如何创建imageRef,然后将其设置为子图层的contents属性:
CGImageRef imageRef = CGImageCreateWithImageInRect([self.image CGImage], cropRectFinal);
_subLayer.contents = (id)[UIImage imageWithCGImage:imageRef].CGImage;
然后在右下方添加CGImageRelease(imageRef);
,它修复了崩溃和内存警告以及内存压力问题。
但是我想确保我发布上述代码中应该包含的所有内容。
另外,我有3种不同的方法实现,它们都能获得捕获图像时所需的完全相同的旋转效果。但是,无论我使用哪一个,当我使用时间分析器在Instruments中运行我的应用程序时,它们都有一个代码语句占用了所有时间。
下面是3个方法实现,正好在每个方法上面是导致延迟并占用所有时间的代码语句。希望你们中的一些人能够看到这些并告诉我如何解决这个问题。
方法实施1:
所有时间的陈述:[self.image drawInRect:(CGRect){0, 0, self.image.size}];
- (UIImage *)normalizedImage {
if (self.image.imageOrientation == UIImageOrientationUp) return self.image;
UIGraphicsBeginImageContextWithOptions(self.image.size, NO, self.image.scale);
[self.image drawInRect:(CGRect){0, 0, self.image.size}];
UIImage *normalizedImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return normalizedImage;
}
方法实施2:
所有时间的陈述:[src drawAtPoint:CGPointMake(0, 0)];
-(UIImage*) rotate:(UIImage*) src andOrientation:(UIImageOrientation)orientation
{
UIGraphicsBeginImageContext(src.size);
CGContextRef context=(UIGraphicsGetCurrentContext());
if (orientation == UIImageOrientationRight) {
CGContextRotateCTM (context, 90/180*M_PI) ;
} else if (orientation == UIImageOrientationLeft) {
CGContextRotateCTM (context, -90/180*M_PI);
} else if (orientation == UIImageOrientationDown) {
// NOTHING
} else if (orientation == UIImageOrientationUp) {
CGContextRotateCTM (context, 90/180*M_PI);
}
[src drawAtPoint:CGPointMake(0, 0)];
UIImage *img=UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return img;
}
方法实施3:
所有时间的陈述:CGContextDrawImage(ctx, CGRectMake(0,0,image.size.width,image.size.height), image.CGImage);
- (UIImage *)fixrotation:(UIImage *)image{
if (image.imageOrientation == UIImageOrientationUp) return image;
CGAffineTransform transform = CGAffineTransformIdentity;
switch (image.imageOrientation) {
case UIImageOrientationDown:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, image.size.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, 0, image.size.height);
transform = CGAffineTransformRotate(transform, -M_PI_2);
break;
case UIImageOrientationUp:
case UIImageOrientationUpMirrored:
break;
}
switch (image.imageOrientation) {
case UIImageOrientationUpMirrored:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationLeftMirrored:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, image.size.height, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationUp:
case UIImageOrientationDown:
case UIImageOrientationLeft:
case UIImageOrientationRight:
break;
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
CGContextRef ctx = CGBitmapContextCreate(NULL, image.size.width, image.size.height,
CGImageGetBitsPerComponent(image.CGImage), 0,
CGImageGetColorSpace(image.CGImage),
CGImageGetBitmapInfo(image.CGImage));
CGContextConcatCTM(ctx, transform);
switch (image.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
// Grr...
CGContextDrawImage(ctx, CGRectMake(0,0,image.size.height,image.size.width), image.CGImage);
break;
default:
CGContextDrawImage(ctx, CGRectMake(0,0,image.size.width,image.size.height), image.CGImage);
break;
}
// And now we just create a new UIImage from the drawing context
CGImageRef cgimg = CGBitmapContextCreateImage(ctx);
UIImage *img = [UIImage imageWithCGImage:cgimg];
CGContextRelease(ctx);
CGImageRelease(cgimg);
return img;
}