奥林巴斯相机套件:应用程序在实时视图上绘制面部检测框时退出

时间:2015-08-11 00:31:29

标签: ios objective-c olympus-air olympus-camerakit

我正在研究Olympus Air A01的捕捉程序,该程序可以实时更新实时取景图像上的人脸检测结果。从Camera Kit获取面部检测结果,但是当检测到面部时我的应用程序退出。我的程序会更新didUpdateLiveView中的实时取景图片,并将数据传递给drawFaceFrame:cameraFrame:

Olympus Camera Kit的版本是1.1.0。

@interface ViewController() <OLYCameraLiveViewDelegate>
//OLYCamera Class
@property (weak, nonatomic) OLYCamera *camera;

//For live view
@property (weak, nonatomic) IBOutlet UIImageView *liveView; 

//8 UIViews for face detection frame
@property (strong, nonatomic) IBOutletCollection(UIView) NSArray *faceViews; 
@end

- (void)camera:(OLYCamera *)camera didUpdateLiveView:(NSData *)data metadata:(NSDictionary *)metadata
{
    //UPDATE LIVE VIEW IMAGE HERE

        CGRect frame = AVMakeRectWithAspectRatioInsideRect(self.liveView.image.size, self.liveView.frame); 
        [self drawFaceFrame:camera.detectedHumanFaces cameraFrame:frame];
    }
}

//Draw face detection frame
- (void)drawFaceFrame:(NSDictionary *)faces cameraFrame:(CGRect)frame
{    
    const CGFloat ratioW = cameraFrame.size.width / self.imageView.image.size.width;
        const CGFloat ratioH = cameraFrame.size.height / self.imageView.image.size.height;

        unsigned int i = 0;
        for ( ; detectedHumanFaces && i < detectedHumanFaces.count ; ++i)
        {
        NSString *key = [NSString stringWithFormat:@"%d", i];
            NSValue *value = detectedHumanFaces[key];
            CGRect rect = [value CGRectValue]; 
            CGRect rectInImage = OLYCameraConvertRectOnViewfinderIntoLiveImage(rect, self.imageView.image); 
        CGRect rectInView = rectInImage;
        rectInView.origin.x *= ratioW; 
        rectInView.origin.y *= ratioH;
        rectInView.origin.x += cameraFrame.origin.x;
        rectInView.origin.y += cameraFrame.origin.y;
        rectInView.size.width *= ratioW;
        rectInView.size.height *= ratioH;

        if (i < self.faceViews.count)
        {
            UIView *faceView = [self.faceViews objectAtIndex:i];
            CALayer *layer = [faceView layer];
            [layer setBorderColor:[UIColor redColor].CGColor];

            faceView.frame = rectInView;
            faceView.hidden = NO;
        }
    }

    // Hide unused frames
    for ( ; i < [self.faceViews count] ; ++i) 
    {
        UIView *faceView = [self.faceViews objectAtIndex:i];
        faceView.hidden = YES;
    }
}

1 个答案:

答案 0 :(得分:2)

似乎有两个问题:

  1. 即使相机未检测到任何脸部,您的应用也会绘制每个脸部检测框。
  2. 相机套件可能会在您的应用计算框架坐标和应用程序绘制框架的时间内更新detectedHumanFaces
  3. 第一点并不重要,但不是那么好。我建议使用键值观察(KVO)技术,该技术仅在相机检测到脸部时调用。

    KVO有效解决第二个问题。您可以在observer调用的方法中复制detectedHumanFaces。观察者在下面的代码中调用detectedHumanFacesValueDidChange方法。

    @interface ViewController () <OLYCameraLiveViewDelegate>
    //OLYCamera Class
    @property (weak, nonatomic) OLYCamera *camera;
    //For face detection frames
    @property (strong, nonatomic) IBOutletCollection(UIView) NSArray *detectedHumanFaceViews;
    @end
    
    /// Called by observer when Camera Kit update detectedHumanFaces property.
    - (void)detectedHumanFacesValueDidChange {
    
        // Save live view image and detected face information
        // to make consistent with the number of faces and their coordinates when app updates face detection frame.
        UIImage *image = self.imageView.image;
        NSDictionary *detectedHumanFaces = camera.detectedHumanFaces;
        if (image == nil || detectedHumanFaces == nil) {
            // Show face detection frame only if a live view image and detected face information are confirmed.
            for (UIView *detectedHumanFaceView in self.detectedHumanFaceViews) {
                detectedHumanFaceView.hidden = YES;
            }
            return;
        }
        for (NSInteger index = 0; index < self.detectedHumanFaceViews.count; index++) {
            // Confirm detected face information corresponding to the view for face detection frame.
            // The camera detects eight faces at the maximum.
            UIView *detectedHumanFaceView = self.detectedHumanFaceViews[index];
            NSString *faceKey = [NSString stringWithFormat:@"%ld", index];
            NSValue *faceValue = detectedHumanFaces[faceKey];
            if (!faceValue) {
                detectedHumanFaceView.hidden = YES;
                continue;
            }
            // Decide coordinates of the face detection frame on the screen  
            CGRect imageRect = AVMakeRectWithAspectRatioInsideRect(image.size, self.imageView.bounds);
            CGFloat xRatio = imageRect.size.width / image.size.width;
            CGFloat yRatio = imageRect.size.height / image.size.height;
            CGRect faceRect = OLYCameraConvertRectOnViewfinderIntoLiveImage([faceValue CGRectValue], image);
            CGFloat x = faceRect.origin.x * xRatio + imageRect.origin.x;
            CGFloat y = faceRect.origin.y * yRatio + imageRect.origin.y;
            CGFloat w = faceRect.size.width * xRatio;
            CGFloat h = faceRect.size.height * yRatio;
            CGRect viewRect = CGRectMake(x, y, w, h);
            // Draw face detection frame.
            detectedHumanFaceView.frame = viewRect;
            detectedHumanFaceView.hidden = NO;
        }
    }