在Swift-图像分类器中设置CoreML模型

时间:2020-03-19 16:29:21

标签: ios swift xcode machine-learning coreml

我已经训练了一个模型来区分恶性和良性皮肤病变,以潜在地检测患者是否患有皮肤癌,并将我的keras模型转换为coreML。现在,我试图使用我完全没有经验(仍然通过反复试验学习)的swift(通过Xcode)将模型应用于ios应用。

当前,我正在尝试通过一个简单的应用程序使模型工作,该应用程序仅从手机的摄像头获取图像以获取预测的标签作为输出,但是我非常想让摄像头实际工作以实现此目的。

import UIKit
import CoreML
import Vision
import Social

@UIApplicationMain
class ViewControl: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, UIApplicationDelegate {

    @IBOutlet weak var imageView: UIImageView!
    var classificationResults : [VNClassificationObservation] = []

    let imagePicker = UIImagePickerController()

    override func viewDidLoad() {
        super.viewDidLoad()

        imagePicker.delegate = self

    }

    func detect(image: CIImage) {

        // Load the ML model through its generated class
        guard let model = try? VNCoreMLModel(for: weights_skin_cancer().model) else {
            fatalError("can't load ML model")
        }

        let request = VNCoreMLRequest(model: model) { request, error in
            guard let results = request.results as? [VNClassificationObservation],
                let topResult = results.first
                else {
                    fatalError("unexpected result type from VNCoreMLRequest")
                }

                if topResult.identifier.contains("malignant") {
                    DispatchQueue.main.async {
                        self.navigationItem.title = "mal!"
                        self.navigationController?.navigationBar.barTintColor = UIColor.green
                        self.navigationController?.navigationBar.isTranslucent = false


                    }
                }
                else {
                    DispatchQueue.main.async {
                        self.navigationItem.title = "benign!"
                        self.navigationController?.navigationBar.barTintColor = UIColor.red
                        self.navigationController?.navigationBar.isTranslucent = false

                    }
                }


        }

        let handler = VNImageRequestHandler(ciImage: image)

        do { try handler.perform([request]) }
        catch { print(error) }



    }


    func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {


        if let image = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {

            imageView.image = image

            imagePicker.dismiss(animated: true, completion: nil)


            guard let ciImage = CIImage(image: image) else {
                fatalError("couldn't convert uiimage to CIImage")
            }

            detect(image: ciImage)

        }
    }


    @IBAction func cameraTapped(_ sender: Any) {

        imagePicker.sourceType = .camera
        imagePicker.allowsEditing = false

        present(imagePicker, animated: true, completion: nil)
    }

}

这也是用于将我的模型转换为coreML以供参考的代码:

import coremltools

output_labels = ['benign', 'malignant']
scale = 1/255.
coreml_model = coremltools.converters.keras.convert('/Users/Grampun/Desktop/ISIC-Archive-Downloader-master/trained_models/lr_0.00006-400_DS-20_epochs/weights.best.from_scratch.6.hdf5',
                                                    input_names='image',
                                                    image_input_names='image',
                                                    output_names='output',
                                                    class_labels=output_labels,
                                                    image_scale=scale)

coreml_model.author = 'Jack Bugeja'
coreml_model.short_description = 'Model used to identify between benign and malignant skin lesions'

coreml_model.input_description['image'] = 'Dermascopic image of skin lesion to evaluate'
coreml_model.input_description['output'] = 'Malignant/Benign'

coreml_model.save(
    '/Users/Grampun/Desktop/ISIC-Archive-Downloader-master/trained_models/model_for_ios/lr_0.00006-400_DS-20_epochs/weights_skin_cancer.mlmodel')

一般而言,任何帮助将不胜感激。 谢谢!

1 个答案:

答案 0 :(得分:0)

  1. 打开相机:

    @IBAction func cameraTapped(_ sender: Any) {
        let controller = UIImagePickerController()
        controller.sourceType = .camera
        controller.mediaTypes = ["public.image"]
        controller.allowsEditing = false
        controller.delegate = self
        present(controller, animated: true)
    }
    
  2. YourModel.mlmodel添加到您的项目中。

  3. didFinishPickingMediaWithInfo中添加以下代码:

    if let imageURL = info[.imageURL] as? URL {
        if let image = UIImage(contentsOfFile: imageURL.absoluteString) {
            self.getPrediction(image)
        }
    }
    
  4. 添加它以获得预测:

    func getPrediction(_ image: UIImage) {
        let model = YourModel()
    
        guard let pixelBuffer = buffer(from: image) else { return }
        guard let prediction = try? model.prediction(image: pixelBuffer) else { return }
    
        print(prediction.classLabel) // Most likely image category as string value
    }
    
  5. 使用此辅助功能从您的UIImage中提取一个CVPixelBuffer,您需要在getPrediction()中使用它

    func buffer(from image: UIImage) -> CVPixelBuffer? {
        let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
        var pixelBuffer : CVPixelBuffer?
        let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
        guard (status == kCVReturnSuccess) else {
            return nil
        }
    
        CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
        let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
    
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
        let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
    
        context?.translateBy(x: 0, y: image.size.height)
        context?.scaleBy(x: 1.0, y: -1.0)
    
        UIGraphicsPushContext(context!)
        image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
        UIGraphicsPopContext()
        CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
    
        return pixelBuffer
    }