我想将coreML模型中的输入图像缩放到64X64,但无法执行

时间:2018-08-28 17:54:56

标签: coreml

这是代码

ViewController类:UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {

let identifierLabel: UILabel = {
    let label = UILabel()
    label.backgroundColor = .white
    label.textAlignment = .center
    label.translatesAutoresizingMaskIntoConstraints = false
    return label
}()

override func viewDidLoad() {
    super.viewDidLoad()

    // here is where we start up the camera
    // for more details visit: https://www.letsbuildthatapp.com/course_video?id=1252
    let captureSession = AVCaptureSession()
    captureSession.sessionPreset = .photo

    guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
    guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
    captureSession.addInput(input)

    captureSession.startRunning()

    let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
    view.layer.addSublayer(previewLayer)
    previewLayer.frame = view.frame

    let dataOutput = AVCaptureVideoDataOutput()
    dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
    captureSession.addOutput(dataOutput)

// VNImageRequestHandler(cgImage:<#T ## CGImage#>,选项:[:])。perform(<#T ## requests:[VNRequest] ## [VNRequest]#>)

    setupIdentifierConfidenceLabel()
}

fileprivate func setupIdentifierConfidenceLabel() {
    view.addSubview(identifierLabel)
    identifierLabel.bottomAnchor.constraint(equalTo: view.bottomAnchor, constant: -32).isActive = true
    identifierLabel.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
    identifierLabel.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
    identifierLabel.heightAnchor.constraint(equalToConstant: 50).isActive = true
}


func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {

// print(“相机能够捕获帧:”,Date())

    guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }

    // !!!Important
    // make sure to go download the models at https://developer.apple.com/machine-learning/ scroll to the bottom 
    guard let model = try? VNCoreMLModel(for: handWritten().model) else { return }
    let request = VNCoreMLRequest(model: model) { (finishedReq, err) in

        //perhaps check the err

//打印(finishedReq.​​results)

        guard let results = finishedReq.results as? [VNClassificationObservation] else { return }

        guard let firstObservation = results.first else { return }

        print(firstObservation.identifier, firstObservation.confidence)

        DispatchQueue.main.async {
            self.identifierLabel.text = "\(firstObservation.identifier) \(firstObservation.confidence * 100)"
        }

    }
    try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}

}

0 个答案:

没有答案