此代码未显示相机中的脸部检测,即使没有错误。 我希望脸部应该在红色绅士的环绕下实时检测,但我认为我没有正确放置代码或者我应该在Viewdidload中放置什么东西?
import UIKit
import CoreImage
class ViewController: UIViewController ,UIAlertViewDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
@IBOutlet var imageView: UIImageView!
@IBAction func Moodify(_ sender: UIButton) {
func detect() {
guard let personciImage = CIImage(image: imageView.image!) else {
return
}
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: personciImage)
// For converting the Core Image Coordinates to UIView Coordinates
let ciImageSize = personciImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
for face in faces as! [CIFaceFeature] {
print("Found bounds are \(face.bounds)")
// Apply the transform to convert the coordinates
var faceViewBounds = face.bounds.applying(transform)
// Calculate the actual position and size of the rectangle in the image view
let viewSize = imageView.bounds.size
let scale = min(viewSize.width / ciImageSize.width,
viewSize.height / ciImageSize.height)
let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
let faceBox = UIView(frame: faceViewBounds)
//let faceBox = UIView(frame: face.bounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.red.cgColor
faceBox.backgroundColor = UIColor.clear
imageView.addSubview(faceBox)
if face.hasLeftEyePosition {
print("Left eye bounds are \(face.leftEyePosition)")
}
if face.hasRightEyePosition {
print("Right eye bounds are \(face.rightEyePosition)")
}
}
}
let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = true
picker.sourceType = .camera
picker.cameraDevice = .front
self.present(picker, animated: true, completion: { _ in })
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [AnyHashable: Any]) {
let chosenImage = info[UIImagePickerControllerEditedImage]
self.imageView!.image = chosenImage as? UIImage
picker.dismiss(animated: true, completion: { _ in })
}
// picker.dismiss(animated: true, completion: { _ in })
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
picker.dismiss(animated: true, completion: { _ in })
}
}
override func viewDidLoad() {
let alert = UIAlertController(title: "Ooops!!!", message: "Camera is not connected", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "Connect", style: UIAlertActionStyle.default, handler: nil))
self.present(alert, animated: true, completion: nil)
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
答案 0 :(得分:0)
您最有可能只需要按照文档中描述的方式触发该功能
我们将在viewDidLoad中调用detect方法。因此,在方法中插入以下代码行:
override func viewDidLoad() {
super.viewDidLoad()
detect()
}
编译并运行应用程序。
编辑: 这是解决方案,而函数“detect”是一个子类方法,但在您的情况下,您使用IBAction,它具有不同的语法。您应该尝试删除函数detect()和此括号
的名称}
let picker =
这个部分必须在函数内部
let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = true
picker.sourceType = .camera
picker.cameraDevice = .front
self.present(picker, animated: true, completion: { _ in })
对于您的情况,您也可以省略此部分。
答案 1 :(得分:0)
查看完代码后,您甚至在拍完后都没有打电话给detect()
。我尝试按照下面的说明修复它,但是,detect()
会返回零面,就像我在Face Detection with Camera中所描述的那样。
lazy var picker: UIImagePickerController = {
let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = true
picker.sourceType = .camera
picker.cameraDevice = .front
return picker
}()
@IBOutlet var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
imageView.contentMode = .scaleAspectFit
}
@IBAction func TakePhoto(_ sender: Any) {
self.present(picker, animated: true, completion: nil)
}
// MARK: - UIImagePickerControllerDelegate
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
self.imageView!.image = chosenImage
// Got the image from camera, the imageView.image is not nil, so it's time for facial detection
detect()
picker.dismiss(animated: true, completion: nil)
}
}
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
picker.dismiss(animated: true, completion: nil)
}
// MARK: - Face Detection
func detect() {
guard let personciImage = CIImage(image: imageView.image!) else {
return
}
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: personciImage)
// For converting the Core Image Coordinates to UIView Coordinates
let ciImageSize = personciImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
print("faces.count = \(faces?.count)")
for face in faces as! [CIFaceFeature] {
print("Found bounds are \(face.bounds)")
// Apply the transform to convert the coordinates
var faceViewBounds = face.bounds.applying(transform)
// Calculate the actual position and size of the rectangle in the image view
let viewSize = imageView.bounds.size
let scale = min(viewSize.width / ciImageSize.width,
viewSize.height / ciImageSize.height)
let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
let faceBox = UIView(frame: faceViewBounds)
//let faceBox = UIView(frame: face.bounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.red.cgColor
faceBox.backgroundColor = UIColor.clear
imageView.addSubview(faceBox)
if face.hasLeftEyePosition {
print("Left eye bounds are \(face.leftEyePosition)")
}
if face.hasRightEyePosition {
print("Right eye bounds are \(face.rightEyePosition)")
}
}
}