我正在和Apples CIDetector玩耍,以使用手机的前置摄像头从实时视频中检测到面部。我一直在关注这个article,并且几乎可以正常使用。我遇到的问题是,在每个帧上都会创建一个新的红色框,而不是重复使用相同的框。
我正在遵循的教程旨在编写代码来阻止这种情况的发生,但是它似乎没有用。我对Swift还是很陌生,正在努力解决。
这是我正在使用的代码:
func drawFaceMasksFor(features: [CIFaceFeature], bufferFrame: CGRect) {
CATransaction.begin()
CATransaction.setValue(kCFBooleanTrue, forKey: kCATransactionDisableActions)
//Hide all current masks
view.layer.sublayers?.filter({ $0.name == "MaskFace" }).forEach { $0.isHidden = true }
//Do nothing if no face is dected
guard !features.isEmpty else {
CATransaction.commit()
return
}
//The problem is we detect the faces on video image size
//but when we show on the screen which might smaller or bigger than your video size
//so we need to re-calculate the faces bounds to fit to your screen
let xScale = view.frame.width / bufferFrame.width
let yScale = view.frame.height / bufferFrame.height
let transform = CGAffineTransform(rotationAngle: .pi).translatedBy(x: -bufferFrame.width,
y: -bufferFrame.height)
for feature in features {
var faceRect = feature.bounds.applying(transform)
faceRect = CGRect(x: faceRect.minX * xScale,
y: faceRect.minY * yScale,
width: faceRect.width * xScale,
height: faceRect.height * yScale)
//Reuse the face's layer
var faceLayer = view.layer.sublayers?
.filter { $0.name == "MaskFace" && $0.isHidden == true }
.first
if faceLayer == nil {
// prepare layer
faceLayer = CALayer()
faceLayer?.backgroundColor = UIColor.clear.cgColor
faceLayer?.borderColor = UIColor.red.cgColor
faceLayer?.borderWidth = 3.0
faceLayer?.frame = faceRect
faceLayer?.masksToBounds = true
faceLayer?.contentsGravity = kCAGravityResizeAspectFill
view.layer.addSublayer(faceLayer!)
} else {
faceLayer?.frame = faceRect
faceLayer?.position = faceRect.origin
faceLayer?.isHidden = false
}
//You can add some masks for your left eye, right eye, mouth
}
CATransaction.commit()
}