我可以进行人脸检测,并且可以每隔一定时间在每个人脸周围添加一个红色矩形。我无法解决的问题是将每个检测到的面部裁剪为面部图像阵列。我知道检测到的脸部和裁剪部分的坐标系是相反的,但是无法弄清楚如何正确地裁剪脸部。如何从原始“ capturedImage”产生的面孔中裁剪每个面孔??
let image = CIImage.init(cvPixelBuffer: capturedImage)
//imgFull.image = UIImage(ciImage: image.oriented(CGImagePropertyOrientation.right).cropped(to: CGRect.init(x: 50, y: 50, width: 200, height: 200)));
let imageOptions = NSDictionary(object: NSNumber(value: 8) as NSNumber, forKey: CIDetectorImageOrientation as NSString)
let options = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: options)!
let faces = faceDetector.features(in: image, options: imageOptions as? [String : AnyObject])
let detectFaceRequest = VNDetectFaceRectanglesRequest { (request, error) in
DispatchQueue.main.async {
//Loop through the resulting faces and add a red UIView on top of them.
if let faces = request.results as? [VNFaceObservation] {
for face in faces {
let faceView = UIView(frame: self.faceFrame(from: face.boundingBox))
let newImage: CGImage = self.convertCIImageToCGImage(inputImage: image.oriented(CGImagePropertyOrientation.right))
let imageRef: CGImage = (newImage.cropping(to: face.boundingBox))!
let croppedImage = UIImage(cgImage: imageRef)
self.imgFull.image = croppedImage;