我想录制一系列片段,通过视频播放器一起播放或ffmpeg -f concat
无缝播放。
现在,在任何一种情况下,我都会在每个分段连接点上听到非常明显的音频打嗝。
我目前的策略是维持2个AssetWriter
个实例。在每个截止点,我开始一个新的作家,等到它准备好,然后开始给它样品。当视频和音频样本在特定时间点完成时,我关闭最后一个作者。
如何修改此功能以获得连续剪辑录制?什么是根本原因?
import Foundation
import UIKit
import AVFoundation
class StreamController: UIViewController, AVCaptureAudioDataOutputSampleBufferDelegate, AVCaptureVideoDataOutputSampleBufferDelegate {
@IBOutlet weak var previewView: UIView!
var closingVideoInput: AVAssetWriterInput?
var closingAudioInput: AVAssetWriterInput?
var closingAssetWriter: AVAssetWriter?
var currentVideoInput: AVAssetWriterInput?
var currentAudioInput: AVAssetWriterInput?
var currentAssetWriter: AVAssetWriter?
var nextVideoInput: AVAssetWriterInput?
var nextAudioInput: AVAssetWriterInput?
var nextAssetWriter: AVAssetWriter?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoHelper: VideoHelper?
var startTime: NSTimeInterval = 0
override func viewDidLoad() {
super.viewDidLoad()
startTime = NSDate().timeIntervalSince1970
createSegmentWriter()
videoHelper = VideoHelper()
videoHelper!.delegate = self
videoHelper!.startSession()
NSTimer.scheduledTimerWithTimeInterval(5, target: self, selector: "createSegmentWriter", userInfo: nil, repeats: true)
}
func createSegmentWriter() {
print("Creating segment writer at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
nextAssetWriter = try! AVAssetWriter(URL: NSURL(fileURLWithPath: OutputFileNameHelper.instance.pathForOutput()), fileType: AVFileTypeMPEG4)
nextAssetWriter!.shouldOptimizeForNetworkUse = true
let videoSettings: [String:AnyObject] = [AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: 960, AVVideoHeightKey: 540]
nextVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
nextVideoInput!.expectsMediaDataInRealTime = true
nextAssetWriter?.addInput(nextVideoInput!)
let audioSettings: [String:AnyObject] = [
AVFormatIDKey: NSNumber(unsignedInt: kAudioFormatMPEG4AAC),
AVSampleRateKey: 44100.0,
AVNumberOfChannelsKey: 2,
]
nextAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
nextAudioInput!.expectsMediaDataInRealTime = true
nextAssetWriter?.addInput(nextAudioInput!)
nextAssetWriter!.startWriting()
}
override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
previewLayer = AVCaptureVideoPreviewLayer(session: videoHelper!.captureSession)
previewLayer!.frame = self.previewView.bounds
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
if ((previewLayer?.connection?.supportsVideoOrientation) != nil) {
previewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
}
self.previewView.layer.addSublayer(previewLayer!)
}
func closeWriter() {
if videoFinished && audioFinished {
let outputFile = closingAssetWriter?.outputURL.pathComponents?.last
closingAssetWriter?.finishWritingWithCompletionHandler() {
let delta = NSDate().timeIntervalSince1970 - self.startTime
print("segment \(outputFile) finished at t=\(delta)")
}
self.closingAudioInput = nil
self.closingVideoInput = nil
self.closingAssetWriter = nil
audioFinished = false
videoFinished = false
}
}
func closingVideoFinished() {
if closingVideoInput != nil {
videoFinished = true
closeWriter()
}
}
func closingAudioFinished() {
if closingAudioInput != nil {
audioFinished = true
closeWriter()
}
}
var closingTime: CMTime = kCMTimeZero
var audioFinished = false
var videoFinished = false
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBufferRef, fromConnection connection: AVCaptureConnection!) {
let sampleTime: CMTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if let nextWriter = nextAssetWriter {
if nextWriter.status.rawValue != 0 {
print("Switching asset writers at t=\(NSDate().timeIntervalSince1970 - self.startTime)")
closingAssetWriter = currentAssetWriter
closingVideoInput = currentVideoInput
closingAudioInput = currentAudioInput
currentAssetWriter = nextAssetWriter
currentVideoInput = nextVideoInput
currentAudioInput = nextAudioInput
nextAssetWriter = nil
nextVideoInput = nil
nextAudioInput = nil
closingTime = sampleTime
currentAssetWriter!.startSessionAtSourceTime(sampleTime)
}
}
if currentAssetWriter != nil {
if let _ = captureOutput as? AVCaptureVideoDataOutput {
if (CMTimeCompare(sampleTime, closingTime) < 0) {
if closingVideoInput?.readyForMoreMediaData == true {
closingVideoInput?.appendSampleBuffer(sampleBuffer)
}
} else {
closingVideoFinished()
if currentVideoInput?.readyForMoreMediaData == true {
currentVideoInput?.appendSampleBuffer(sampleBuffer)
}
}
} else if let _ = captureOutput as? AVCaptureAudioDataOutput {
if (CMTimeCompare(sampleTime, closingTime) < 0) {
if currentAudioInput?.readyForMoreMediaData == true {
currentAudioInput?.appendSampleBuffer(sampleBuffer)
}
} else {
closingAudioFinished()
if currentAudioInput?.readyForMoreMediaData == true {
currentAudioInput?.appendSampleBuffer(sampleBuffer)
}
}
}
}
}
override func shouldAutorotate() -> Bool {
return true
}
override func supportedInterfaceOrientations() -> UIInterfaceOrientationMask {
return [UIInterfaceOrientationMask.LandscapeRight]
}
}
答案 0 :(得分:1)
我认为根本原因是视频和音频CMSampleBuffer
代表不同的时间间隔。您需要拆分并加入音频CMSampleBuffer
,以使其无缝插入您的AVAssetWriter
时间线,该时间轴可能应基于视频演示时间戳。
为什么音频必须改变而不是视频?它似乎不对称,但我猜它是因为音频具有更高的采样率。
P.S。实际上创建新的拆分样本缓冲区看起来令人生畏。 CMSampleBufferCreate
有很多论点。使用CMSampleBufferCopySampleBufferForRange
可能更容易,也更有效率。