导入UIKit 导入AVKit 导入视觉
ViewController类:UIViewController,AVCaptureAudioDataOutputSampleBufferDelegate {
@IBOutlet weak var resultLabel: UILabel!
@IBOutlet weak var observeButtonOutlet: UIButton!
var captureSession: AVCaptureSession!
var observing = false
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
captureSession = AVCaptureSession()
setUpCapture()
}
//Mark: IBActions
@IBAction func observeButtonPressed(_ sender: Any) {
observing = !observing
if observing {
observeButtonOutlet.setTitle("Stop", for: .normal)
startCapturing()
} else {
observeButtonOutlet.setTitle("Observe", for: .normal)
stopCapturing()
}
}
func startCapturing() {
captureSession.startRunning()
}
func stopCapturing() {
captureSession.stopRunning()
}
func setUpCapture() {
captureSession.sessionPreset = .photo
let captureDevice = AVCaptureDevice.default(for: .video)
guard let input = try? AVCaptureDeviceInput(device: captureDevice!) else { return }
captureSession.addInput(input)
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = CGRect(x: 0, y: 0, width: self.view.frame.width, height: self.view.frame.height - 70)
let dataOutput = AVCaptureAudioDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
}
//Mark: AVCaptureVideoOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Captured frame", Date())
let cvPixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
guard let model = try? VNCoreMLModel(for: SqueezeNet().model) else { return }
let request = VNCoreMLRequest(model: model) { (request, error) in
if error != nil {
print("error \(error!.localizedDescription)")
return
}
// print("request \(request.results)")
guard let result = request.results as? [VNClassificationObservation] else { return }
guard let firstObservation = result.first else { return }
DispatchQueue.main.async {
let confidence = String(format: "%.2f", firstObservation.confidence * 100)
self.resultLabel.text = "\(firstObservation.identifier, confidence) %"
}
}
try? VNImageRequestHandler(cvPixelBuffer: cvPixelBuffer, options: [:]).perform([request])
}
}
我已经使用SqueezeNet ML模型编译了以上代码,但不确定为什么我无法在标签中看到项目名称。附上下面的屏幕截图。
问题是,相机中捕获的项目未显示在resultLabel中。