我正在使用IOS的CoreImage
框架进行面部检测。示例代码工作正常。但是当我从照片库中加载ImagePicker
的图像时,它不会进行正确的面部检测。
这是我用于面部检测的代码:
-(void)openCamera
{
picker_ = [[UIImagePickerController alloc] init];
picker_.delegate = (id)self;
picker_.sourceType = UIImagePickerControllerSourceTypePhotoLibrary;
picker_.wantsFullScreenLayout = YES;
[self presentViewController:picker_ animated:YES completion:nil];
picker_=nil;
}
- (void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info
{
[picker dismissModalViewControllerAnimated:YES];
imgVw_User.image = [info objectForKey:UIImagePickerControllerOriginalImage];
[picker dismissModalViewControllerAnimated:YES];
[self faceDetector];
}
-(void)faceDetector
{
// Draw the face detection image
[self.view addSubview:imgVw_User];
// imgVw_User.image=[UIImage imageNamed:@"73_Before_Fractional_Skin_Resurfacing.jpg"];
// Execute the method used to markFaces in background
[self performSelectorInBackground:@selector(markFaces:) withObject:imgVw_User];
// flip image on y-axis to match coordinate system used by core image
[imgVw_User setTransform:CGAffineTransformMakeScale(1, -1)];
// flip the entire window to make everything right side up
[self.view setTransform:CGAffineTransformMakeScale(1, -1)];
}
-(void)markFaces:(UIImageView *)facePicture
{
// draw a CI image with the previously loaded face detection picture
CIImage* image = [CIImage imageWithCGImage:facePicture.image.CGImage];
// create a face detector - since speed is not an issue we'll use a high accuracy
// detector
CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeFace
context:nil options:[NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh forKey:CIDetectorAccuracy]];
// create an array containing all the detected faces from the detector
NSArray* features = [detector featuresInImage:image];
// we'll iterate through every detected face. CIFaceFeature provides us
// with the width for the entire face, and the coordinates of each eye
// and the mouth if detected. Also provided are BOOL's for the eye's and
// mouth so we can check if they already exist.
for(CIFaceFeature* faceFeature in features)
{
// get the width of the face
`CGFloat faceWidth = faceFeature.bounds.size.width;`
`// create a UIView using the bounds of the face`
`UIView* faceView = [[UIView alloc] initWithFrame:faceFeature.bounds];`
// add a border around the newly created UIView
faceView.layer.borderWidth = 1;
faceView.layer.borderColor = [[UIColor redColor] CGColor];
// add the new view to create a box around the face
[self.view addSubview:faceView];
if(faceFeature.hasLeftEyePosition)
{
// create a UIView with a size based on the width of the face
UIView* leftEyeView = [[UIView alloc] initWithFrame:CGRectMake(faceFeature.leftEyePosition.x-faceWidth*0.15, faceFeature.leftEyePosition.y-faceWidth*0.15, faceWidth*0.3, faceWidth*0.3)];
// change the background color of the eye view
[leftEyeView setBackgroundColor:[[UIColor blueColor] colorWithAlphaComponent:0.3]];
// set the position of the leftEyeView based on the face
[leftEyeView setCenter:faceFeature.leftEyePosition];
// round the corners
leftEyeView.layer.cornerRadius = faceWidth*0.15;
// add the view to the window
[self.view addSubview:leftEyeView];
}
if(faceFeature.hasRightEyePosition)
{
// create a UIView with a size based on the width of the face
UIView* leftEye = [[UIView alloc] initWithFrame:CGRectMake(faceFeature.rightEyePosition.x-faceWidth*0.15, faceFeature.rightEyePosition.y-faceWidth*0.15, faceWidth*0.3, faceWidth*0.3)];
// change the background color of the eye view
[leftEye setBackgroundColor:[[UIColor blueColor] colorWithAlphaComponent:0.3]];
// set the position of the rightEyeView based on the face
[leftEye setCenter:faceFeature.rightEyePosition];
// round the corners
leftEye.layer.cornerRadius = faceWidth*0.15;
// add the new view to the window
[self.view addSubview:leftEye];
}
if(faceFeature.hasMouthPosition)
{
// create a UIView with a size based on the width of the face
UIView* mouth = [[UIView alloc] initWithFrame:CGRectMake(faceFeature.mouthPosition.x-faceWidth*0.2, faceFeature.mouthPosition.y-faceWidth*0.2, faceWidth*0.4, faceWidth*0.4)];
// change the background color for the mouth to green
[mouth setBackgroundColor:[[UIColor greenColor] colorWithAlphaComponent:0.3]];
// set the position of the mouthView based on the face
[mouth setCenter:faceFeature.mouthPosition];
// round the corners
mouth.layer.cornerRadius = faceWidth*0.2;
// add the new view to the window
[self.view addSubview:mouth];
}
}
}
如果我将来自imagePicker
的图像更改为应用程序中的静态图像,即73_Before_Fractional_Skin_Resurfacing.jpg
,则检测是正确的。问题出现在图像来自照片库时,即来自ImagePicker
。
请指教..
答案 0 :(得分:4)
可能这段代码会帮助你,使用这个函数返回的图像
- (UIImage *)scaleAndRotateImage:(UIImage *)image {
static int kMaxResolution = 640;
CGImageRef imgRef = image.CGImage;
CGFloat width = CGImageGetWidth(imgRef);
CGFloat height = CGImageGetHeight(imgRef);
CGAffineTransform transform = CGAffineTransformIdentity;
CGRect bounds = CGRectMake(0, 0, width, height);
if (width > kMaxResolution || height > kMaxResolution) {
CGFloat ratio = width/height;
if (ratio > 1) {
bounds.size.width = kMaxResolution;
bounds.size.height = bounds.size.width / ratio;
} else {
bounds.size.height = kMaxResolution;
bounds.size.width = bounds.size.height * ratio;
}
}
CGFloat scaleRatio = bounds.size.width / width;
CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
CGFloat boundHeight;
UIImageOrientation orient = image.imageOrientation;
switch(orient) {
case UIImageOrientationUp:
transform = CGAffineTransformIdentity;
break;
case UIImageOrientationUpMirrored:
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
break;
case UIImageOrientationDown:
transform = CGAffineTransformMakeTranslation
(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationDownMirrored:
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
break;
case UIImageOrientationLeftMirrored:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation
(imageSize.height, imageSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationLeft:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
break;
case UIImageOrientationRightMirrored:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
case UIImageOrientationRight:
boundHeight = bounds.size.height;
bounds.size.height = bounds.size.width;
bounds.size.width = boundHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, M_PI / 2.0);
break;
default:
[NSException raise:NSInternalInconsistencyException
format:@"Invalid image orientation"];
}
UIGraphicsBeginImageContext(bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
} else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM(context, transform);
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
UIImage *returnImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return returnImage;
}