在以前的swift版本中,您可以在ViewController中创建一个AVCaptureVideoPreviewLayer,然后使用view.layer.addSublayer(previewLayer)将其添加到默认视图中。
这在SwiftUI ContentView中如何完成? SwiftUI中的所有View类型似乎都没有addSublayer。没有Text(“ Hello World”)。layer.addSublayer ....
我尝试将PreviewLayer添加到ContentView的各种视图中
import Foundation
import AVFoundation
import Combine
import SwiftUI
class Scanner: NSObject, AVCaptureMetadataOutputObjectsDelegate, ObservableObject {
@Published var captureSession: AVCaptureSession!
@Published var previewLayer: AVCaptureVideoPreviewLayer!
@Published var previewView: UIView
override init() {
captureSession = AVCaptureSession()
previewLayer = nil
//previewView = UIView()
super.init()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
//previewView.layer.addSublayer(previewLayer)
}
import SwiftUI
import Combine
struct ContentView: View {
@ObservedObject var scanner = Scanner()
var body: some View {
//Text("Hello World").layer.addSublayer(scanner.previewLayer)
//Text("")
Text("HelloWorld")//.addSublayer(scanner.previewLayer))
//.previewLayout(scanner.previewLayer)
.layer.addSublayer(scanner.previewLayer)
//.previewLayout(scanner.previewLayer)
//.overlay(scanner.previewView)
scanner.captureSession.startRunning()
}
}
尝试添加PreviewLayer时发生编译错误
答案 0 :(得分:2)
您不能直接添加图层。这就是为什么人们像许多其他事物一样将UIView(Controller)Representable
内的整个事物装瓶的原因。
答案 1 :(得分:0)
我设法将捕获的图像放到SwiftUI视图上
在视图组件中,只需放置一个
Image(uiImage: cameraManager.capturedImage)
对于CameraManager
,使用帧捕获功能,将样本缓冲区转换为UIImage之后,只需将capturedImage
设置为uiImage
。
(参考https://medium.com/ios-os-x-development/ios-camera-frames-extraction-d2c0f80ed05a)
class CameraManager: NSObject, ObservableObject{
...
@Published public var capturedImage: UIImage = UIImage()
...
}
extension CameraManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// transforming sample buffer to UIImage
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let context = CIContext()
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return }
let uiImage = UIImage(cgImage: cgImage)
// publishing changes to the main thread
DispatchQueue.main.async {
self.capturedImage = uiImage
}
}
}
我原本打算捕获每个帧以供以后进行图像处理,尽管它现在可以预览,但是我不确定通过添加计算机视觉算法是否会影响帧捕获。到现在为止,感觉运行模型将在另一个线程上,所以...
好吧,发表评论