我正在尝试在录制视频时裁剪和升级视频。
我使用AssetWriter
保存视频,使用CIFilters
进行实时过滤。
问题在于录制速度非常慢。
详细信息(前6个步骤只是样板代码,所以可以跳过它们):
为GPU使用设置CIContext
,如there所述:
func setupContext() {
// setup the GLKView for video/image preview
OGLContext = EAGLContext(api: EAGLRenderingAPI.openGLES2)
ciContext = CIContext.init(eaglContext: OGLContext!)
}
设置GLKView
;
func setUpGLPreview(){
glView.context = OGLContext!
glView.enableSetNeedsDisplay = false
glView.transform = CGAffineTransform(rotationAngle: CGFloat.pi/2)
glView.frame = CGRect(x: 0.0, y: 20.0, width: view.bounds.width, height: view.bounds.width * 9/16)
glView.bounds = CGRect(x: 0.0, y: 0.0, width: view.bounds.width * 9/16, height: view.bounds.width)
glView.contentMode = .scaleAspectFit
view.updateConstraintsIfNeeded()
glView.bindDrawable()
previewBounds = CGRect(x: 0, y: 0, width: glView.drawableWidth, height: glView.drawableHeight)
}
配置AVCaptureSession
;
配置AssetWriter
的输入:
func configureVideoSessionWithAsset(){
let compressionSettings: [String: Any] = [
AVVideoAverageBitRateKey: NSNumber(value: 20000000),
AVVideoMaxKeyFrameIntervalKey: NSNumber(value: 1),
AVVideoProfileLevelKey: AVVideoProfileLevelH264Baseline41
]
let videoSettings: [String : Any] = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoCompressionPropertiesKey: compressionSettings,
AVVideoWidthKey : 1080,
AVVideoHeightKey : 1920,
AVVideoScalingModeKey:AVVideoScalingModeResizeAspectFill
]
assetWriterInputCamera = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
let sourcePixelBufferAttributesDictionary : [String: Any] = [
String(kCVPixelBufferPixelFormatTypeKey) : kCVPixelFormatType_32BGRA,
String(kCVPixelBufferWidthKey) : 1080,
String(kCVPixelBufferHeightKey) : 1920,
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue
]
bufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterInputCamera!, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
assetWriterInputCamera?.expectsMediaDataInRealTime = true
let transform = CGAffineTransform(rotationAngle: CGFloat.pi/2)
assetWriterInputCamera?.transform = transform
let audioSettings : [String : Any] = [
AVFormatIDKey : NSInteger(kAudioFormatMPEG4AAC),
AVNumberOfChannelsKey : 2,
AVSampleRateKey : NSNumber(value: 44100.0)
]
assetWriterInputAudio = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
assetWriterInputAudio?.expectsMediaDataInRealTime = true
}
每次开始捕获时都会创建新的Writer:
func createNewWriter(){
let outputPath = NSTemporaryDirectory() + Date().description + "output" + "\(state.currentFileFormat.rawValue)"
let outputFileURL = URL(fileURLWithPath: outputPath)
let fileType = (state.currentFileFormat == .mov) ? AVFileTypeQuickTimeMovie : AVFileTypeMPEG4
self.assetWriter = try? AVAssetWriter(outputURL: outputFileURL, fileType: fileType)
if let videoInput = assetWriterInputCamera{
self.assetWriter?.add(videoInput)
}
if let audioInput = assetWriterInputAudio{
self.assetWriter?.add(audioInput)
}
}
最后,录制和过滤:
var cropFilter = CIFilter(name: "CICrop")
var scaleFilter = CIFilter(name: "CILanczosScaleTransform")
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
sessionQueue.async {
guard let imgBuffer = CMSampleBufferGetImageBuffer(sampleBuffer), let previewBounds = self.previewBounds else{
return
}
//recording audio
if self.state.isRecording{
if let audio = self.assetWriterInputAudio, connection.audioChannels.count > 0 && audio.isReadyForMoreMediaData {
self.writerQueue.async {
audio.append(sampleBuffer)
}
return
}
}
let sourceImage = CIImage(cvPixelBuffer: imgBuffer)
//setting drawRect
let sourceAspect = sourceImage.extent.size.width / sourceImage.extent.size.height;
let previewAspect = previewBounds.size.width / previewBounds.size.height;
var drawRect = sourceImage.extent
if (sourceAspect > previewAspect){
drawRect.size.width = drawRect.size.height * previewAspect
} else{
drawRect.size.height = drawRect.size.width / previewAspect
}
//Not recording just showing the preview
if(self.assetWriter == nil || self.assetWriter?.status == AVAssetWriterStatus.completed){
self.glView.bindDrawable()
if(self.OGLContext != EAGLContext.current()){
EAGLContext.setCurrent(self.OGLContext)
}
glClearColor(0, 0, 0, 1.0)
glClear(GLbitfield(GL_COLOR_BUFFER_BIT))
glEnable(GLenum(GL_BLEND))
glBlendFunc(GLenum(GL_ONE), GLenum(GL_ONE_MINUS_SRC_ALPHA))
self.ciContext?.draw(sourceImage, in: previewBounds, from: drawRect)
self.glView.display()
} else{
if !self.state.isRecording {
return
}
let outputResolution = (self.state.currentCamera == .back) ? CGRect(x: 0,y: 0, width: 604, height: 1080) : CGRect(x: 0,y: 0, width: 405, height: 720)
cropFilter?.setValue(sourceImage,
forKey: kCIInputImageKey)
cropFilter?.setValue(CIVector(cgRect: CGRect(x: 0, y: 0, width: outputResolution.width, height: outputResolution.height)),
forKey: "inputRectangle")
var filteredImage:CIImage? = sourceImage.cropping(to: outputResolution)
scaleFilter?.setValue(filteredImage,
forKey: kCIInputImageKey)
scaleFilter?.setValue(1080/filteredImage!.extent.width,
forKey: kCIInputScaleKey)
filteredImage = scaleFilter?.outputImage
//Checking if we started writing
if self.assetWriter?.status == AVAssetWriterStatus.unknown{
if self.assetWriter?.startWriting != nil {
self.assetWriter?.startWriting()
self.assetWriter?.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
}
let time = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
var newPixelBuffer:CVPixelBuffer? = nil
guard let bufferPool = self.bufferAdaptor?.pixelBufferPool else{
return
}
if let filteredImage = filteredImage{
CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, bufferPool, &newPixelBuffer)
self.ciContext?.render(filteredImage, to: newPixelBuffer!, bounds: filteredImage.extent, colorSpace: nil)
self.glView.bindDrawable()
glClearColor(0, 0, 0, 1.0)
glClear(GLbitfield(GL_COLOR_BUFFER_BIT))
glEnable(GLenum(GL_BLEND))
glBlendFunc(GLenum(GL_ONE), GLenum(GL_ONE_MINUS_SRC_ALPHA))
self.ciContext?.draw(filteredImage, in: previewBounds, from: filteredImage.extent)
self.glView.display()
}
if let camera = self.assetWriterInputCamera ,camera.isReadyForMoreMediaData{
self.writerQueue.async {
self.bufferAdaptor?.append(newPixelBuffer!, withPresentationTime: time)
}
}
}
}
}
预览在捕获开始之前工作正常。在捕获期间,帧速率急剧下降到不可用的水平。
问题是:在最后一步我做错了什么?
由于GPU的使用,CIImage
过滤应该非常快,但是一切都被冻结了。
请注意:
GPUImage
,但我认为这对于这项任务来说太过分了; CIFunHouse
和RosyWriter项目。第一个帮助我做了很多事情。第二个使用着色器来过滤视频,这对我来说有点复杂。任何帮助和评论表示赞赏! 如果您对代码有任何疑问,请询问!
谢谢!