我一直在使用swift进行一个项目,该项目使用户可以在点击屏幕时检测到相机正在查看的对象。我遵循this指南来了解Ios机器学习过程的集成。这样一来,相机便会弹出在屏幕上,底部的描述会说明该物品可能是什么。现在,我想使用轻击手势来仅在轻按屏幕时获得对象的标签。
我尝试在以下代码中执行此操作。我不确定
在哪里 @objc func doubleTapped() {
//code
}
进入代码。我已经尝试过:
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let identifierLabel: UILabel = {
let label = UILabel()
label.backgroundColor = .white
label.textAlignment = .center
label.translatesAutoresizingMaskIntoConstraints = false
return label
}()
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
let tap = UITapGestureRecognizer(target: self, action: #selector(doubleTapped))
tap.numberOfTapsRequired = 1
view.addGestureRecognizer(tap)
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
// VNImageRequestHandler(cgImage: <#T##CGImage#>, options: [:]).perform(<#T##requests: [VNRequest]##[VNRequest]#>)
setupIdentifierConfidenceLabel()
}
fileprivate func setupIdentifierConfidenceLabel() {
view.addSubview(identifierLabel)
identifierLabel.bottomAnchor.constraint(equalTo: view.bottomAnchor, constant: -32).isActive = true
identifierLabel.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
identifierLabel.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
identifierLabel.heightAnchor.constraint(equalToConstant: 50).isActive = true
}
@objc func doubleTapped() {
print("hello")
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// !!!Important
// make sure to go download the models at https://developer.apple.com/machine-learning/ scroll to the bottom
guard let model = try? VNCoreMLModel(for: Resnet50().model) else { return }
let request = VNCoreMLRequest(model: model) { (finishedReq, err) in
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print(firstObservation.identifier, firstObservation.confidence)
DispatchQueue.main.async {
self.identifierLabel.text = "\(firstObservation.identifier) \(firstObservation.confidence * 100)"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
}
}
当我不放置轻击手势时,标签会随着相机所面对的对象的变化而变化。当我添加轻击手势及其功能时,代码将中断。
我非常感谢您的帮助。