我是Swift的新手,正在尝试为使用AVCaptureSession
创建的实时摄像机供稿实现文本识别。
实时视图显示为UIView的子层。在最近的几个小时里,我一直在努力将其输出捕获到CMSampleBuffer中。我已经上网,但是这个话题似乎很少。
如果我错了,请纠正我,但是我认为captureOutput
将是这里的功能-不幸的是,我不知道如何填写。
我的CameraViewController:
import UIKit
import AVFoundation
class CameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
@IBOutlet weak var toolbar: UIToolbar!
@IBOutlet weak var cameraView: UIView!
var session: AVCaptureSession?
var device: AVCaptureDevice?
var input: AVCaptureDeviceInput?
var output: AVCaptureMetadataOutput?
var prevLayer: AVCaptureVideoPreviewLayer?
var videoOutput: AVCaptureVideoDataOutput?
override func viewDidLoad() {
super.viewDidLoad()
prevLayer?.frame.size = cameraView.frame.size
createSession()
self.toolbar.setBackgroundImage(UIImage(),
forToolbarPosition: .any,
barMetrics: .default)
self.toolbar.setShadowImage(UIImage(), forToolbarPosition: .any)
}
@IBAction func goBack(_ sender: Any) {
performSegue(withIdentifier: "goBackToAdd", sender: self)
}
func createSession() {
session = AVCaptureSession()
device = AVCaptureDevice.default(for: AVMediaType.video)
do{
input = try AVCaptureDeviceInput(device: device!)
}
catch{
print(error)
}
if let input = input{
session?.addInput(input)
}
prevLayer = AVCaptureVideoPreviewLayer(session: session!)
prevLayer?.frame.size = cameraView.frame.size
prevLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
prevLayer?.connection?.videoOrientation = transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
cameraView.layer.addSublayer(prevLayer!)
session?.startRunning()
}
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTelephotoCamera, .builtInTrueDepthCamera, .builtInWideAngleCamera, ], mediaType: .video, position: position)
if let device = deviceDiscoverySession.devices.first {
return device
}
return nil
}
override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
coordinator.animate(alongsideTransition: { (context) -> Void in
self.prevLayer?.connection?.videoOrientation = self.transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
self.prevLayer?.frame.size = self.cameraView.frame.size
}, completion: { (context) -> Void in
})
super.viewWillTransition(to: size, with: coordinator)
}
func transformOrientation(orientation: UIInterfaceOrientation) -> AVCaptureVideoOrientation {
switch orientation {
case .landscapeLeft:
return .landscapeLeft
case .landscapeRight:
return .landscapeRight
case .portraitUpsideDown:
return .portraitUpsideDown
default:
return .portrait
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
<#code#>
}
}
答案 0 :(得分:1)
在captureOutput中获取UIImage。
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
UIImage *image = [self imageFromSampleBuffer:sampleBuffer];
dispatch_async(dispatch_get_main_queue(), ^{
//< Add your code here that uses the image >
}
}
// Create a UIImage from sample buffer data
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
NSLog(@"imageFromSampleBuffer: called");
// Get a CMSampleBuffer's Core Video image buffer for the media data
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// Lock the base address of the pixel buffer
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// Get the number of bytes per row for the pixel buffer
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
// Get the number of bytes per row for the pixel buffer
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
// Get the pixel buffer width and height
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
// Create a device-dependent RGB color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// Create a bitmap graphics context with the sample buffer data
CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,
bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
// Create a Quartz image from the pixel data in the bitmap graphics context
CGImageRef quartzImage = CGBitmapContextCreateImage(context);
// Unlock the pixel buffer
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
// Free up the context and color space
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// Create an image object from the Quartz image
UIImage *image = [UIImage imageWithCGImage:quartzImage];
// Release the Quartz image
CGImageRelease(quartzImage);
return (image);
}
答案 1 :(得分:0)
这很简单,首先需要制作AVCaptureOutput并将委托设置为其类符合AVCaptureVideoDataOutputSampleBufferDelegate的对象。 对于该类,只需实现方法
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let image = CIImage(cvImageBuffer: imageBuffer)
// ... render the image to a custom layer or write to a AVAsset as you want
}