在同一相机框架中扫描两个QRCode

时间:2016-08-24 10:02:40

标签: ios swift swift2

我目前正在使用SwiftQRCode阅读器来扫描我的QR码,但我想知道是否可以在同一相机镜头内扫描两个QR码。解码所需的信息存储在两个QR码中。

有没有办法生成两个扫描框,扫描然后将这两个信息存储在一个字符串中以便返回给我的主控制器?

public func prepareScan(view: UIView, completion:(stringValue: String)->()) {

    scanFrame = view.bounds

    completedCallBack = completion
    currentDetectedCount = 0

    setupSession()
    setupLayers(view)
}

/// start scan
public func startScan() {
    if session.running {
        print("the  capture session is running")

        return
    }
    session.startRunning()
}

/// stop scan
public func stopScan() {
    if !session.running {
        print("the capture session is not running")

        return
    }
    session.stopRunning()
}

func setupLayers(view: UIView) {
    drawLayer.frame = view.bounds
    view.layer.insertSublayer(drawLayer, atIndex: 0)
    previewLayer.frame = view.bounds
    view.layer.insertSublayer(previewLayer, atIndex: 0)
}

func setupSession() {
    if session.running {
        print("the capture session is running")
        return
    }

    if !session.canAddInput(videoInput) {
        print("can not add input device")
        return
    }

    if !session.canAddOutput(dataOutput) {
        print("can not add output device")
        return
    }

    session.addInput(videoInput)
    session.addOutput(dataOutput)

    dataOutput.metadataObjectTypes = dataOutput.availableMetadataObjectTypes;
    dataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
}

public func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {

    clearDrawLayer()

    for dataObject in metadataObjects {

        if let codeObject = dataObject as? AVMetadataMachineReadableCodeObject,
            obj = previewLayer.transformedMetadataObjectForMetadataObject(codeObject) as? AVMetadataMachineReadableCodeObject {

            if CGRectContainsRect(scanFrame, obj.bounds) {
                currentDetectedCount = currentDetectedCount + 1
                if currentDetectedCount > maxDetectedCount {
                    session.stopRunning()

                    completedCallBack!(stringValue: codeObject.stringValue)

                    if autoRemoveSubLayers {
                        removeAllLayers()
                    }
                }

                // transform codeObject
                drawCodeCorners(previewLayer.transformedMetadataObjectForMetadataObject(codeObject) as! AVMetadataMachineReadableCodeObject)
            }
        }
    }
}

public func removeAllLayers() {
    previewLayer.removeFromSuperlayer()
    drawLayer.removeFromSuperlayer()
}

func clearDrawLayer() {
    if drawLayer.sublayers == nil {
        return
    }

    for layer in drawLayer.sublayers! {
        layer.removeFromSuperlayer()
    }
}

func drawCodeCorners(codeObject: AVMetadataMachineReadableCodeObject) {
    if codeObject.corners.count == 0 {
        return
    }

    let shapeLayer = CAShapeLayer()
    shapeLayer.lineWidth = lineWidth
    shapeLayer.strokeColor = strokeColor.CGColor
    shapeLayer.fillColor = UIColor.clearColor().CGColor
    shapeLayer.path = createPath(codeObject.corners).CGPath

    drawLayer.addSublayer(shapeLayer)
}

func createPath(points: NSArray) -> UIBezierPath {
    let path = UIBezierPath()
    var point = CGPoint()

    CGPointMakeWithDictionaryRepresentation((points[0] as! CFDictionary), &point)
    path.moveToPoint(point)

    var index = 1
    while index < points.count {

        CGPointMakeWithDictionaryRepresentation((points[index] as! CFDictionary), &point)
        path.addLineToPoint(point)

        index = index + 1
    }
    path.closePath()

    return path
}

/// previewLayer
lazy var previewLayer: AVCaptureVideoPreviewLayer = {
    let layer = AVCaptureVideoPreviewLayer(session: self.session)
    layer.videoGravity = AVLayerVideoGravityResizeAspectFill
    return layer
    }()

/// drawLayer
lazy var drawLayer = CALayer()
/// session
lazy var session = AVCaptureSession()
/// input
lazy var videoInput: AVCaptureDeviceInput? = {

    if let device = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo) {
        return try? AVCaptureDeviceInput(device: device)
    }
    return nil
    }()

/// output
lazy var dataOutput = AVCaptureMetadataOutput()

}

0 个答案:

没有答案