我从图像创建视频,由于isReadyForMoreMediaData尚未准备就绪,视频丢失了一些帧。 当我调试时,由于循环而看到了原因,启动下一个缓冲区需要一些延迟时间,但是我不知道该怎么做。
{
for nextDicData in self.selectedPhotosArray{
if (videoWriterInput.isReadyForMoreMediaData) {
if let nextImage = nextDicData["img"] as? UIImage
{
var frameDuration = CMTimeMake(Int64(0), fps)
if let timeVl = nextDicData["time"] as? Float{
framePerSecond = Int64(timeVl * 1000)
print("TIME FRAME : \(timeVl)")
}else{
framePerSecond = Int64(0.1 * 1000)
}
frameDuration = CMTimeMake(framePerSecond ,fps)
let lastFrameTime = CMTimeMake(Int64(lastTimeVl), fps)
let presentationTime = CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextImage.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextImage.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextImage.size.width, height: nextImage.size.height)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextImage.cgImage!, in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
}else{
//not ready
print("write is Not Raady: \(lastTimeVl)")
}
if !appendSucceeded {
break
}
frameCount += 1
lastTimeVl += framePerSecond
print("LAST TIME : \(lastTimeVl)")
}
答案 0 :(得分:0)
AVAssetWriterInput
可以帮助您管理它,并通过调用 isReadyForMoreMediaData
来让您知道 true
何时再次变为 requestMediaDataWhenReady
。
这是苹果文档中的一个例子(翻译成 Swift):
myAVAssetWriterInput.requestMediaDataWhenReady(on: queue) {
while myAVAssetWriterInput.isReadyForMoreMediaData {
let nextSampleBuffer = copyNextSampleBufferToWrite()
if let nextSampleBuffer = nextSampleBuffer {
// you have another frame to add
myAVAssetWriterInput.append(nextSampleBuffer)
} else {
// finished to add frames
myAVAssetWriterInput.markAsFinished()
break
}
}
})
现在,当作家“突然”没有准备好时,不用担心, 它将在下一个 requestMediaDataWhenReady 回调中继续添加帧。
答案 1 :(得分:-1)
迅速5
在pixelBufferAdaptor.append之后添加usleep。
添加睡眠功能的原因是,当有多个输入时,AVASsetWriter尝试以交织模式写入媒体数据,因此写入器必须准备好进行下一次输入(根据您的情况为图像)追加数据。等待一段时间可以为下一次输入做好准备。link
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
//VideoWriterInput must be paused for atleast 50 milliseconds or the buffer wont be ready to append new frame
usleep(useconds_t(50000) )