转换为HLS时,Swift ReplayKit AVAssetWriter视频音频不同步

时间:2019-11-14 00:29:13

标签: ios swift video video-streaming http-live-streaming

在iOS / Swift中,我正在与ReplayKit一起使用AVAssetWriter创建用户屏幕的mov或MP4视频以及麦克风音频。

创建视频时,它可以在本地正常播放,并且音频和视频是同步的。但是,当我使用AWS Mediaconvert将此视频转换为HLS(HTTP Live Stream)格式时,音频与视频不同步。有谁知道是什么原因造成的?我了解了时间编码,也许我需要在视频中添加时间码?有没有更简单的方法来解决此问题,或者有人遇到过类似的问题?

private func startRecordingVideo(){

    //Initialize MP4 Output File for Screen Recorded Video
       let fileManager = FileManager.default
       let urls = fileManager.urls(for: .documentDirectory, in: .userDomainMask)
       guard let documentDirectory: NSURL = urls.first as NSURL? else {
           fatalError("documentDir Error")
       }
       videoOutputURL = documentDirectory.appendingPathComponent("OutputVideo.mov")

       if FileManager.default.fileExists(atPath: videoOutputURL!.path) {
           do {
               try FileManager.default.removeItem(atPath: videoOutputURL!.path)
           } catch {
               fatalError("Unable to delete file: \(error) : \(#function).")
           }
       }

    //Initialize Asset Writer to Write Video to User's Storage
    assetWriter = try! AVAssetWriter(outputURL: videoOutputURL!, fileType:
        AVFileType.mov)

    let videoOutputSettings: Dictionary<String, Any> = [
        AVVideoCodecKey : AVVideoCodecType.h264,
        AVVideoWidthKey : UIScreen.main.bounds.size.width,
        AVVideoHeightKey : UIScreen.main.bounds.size.height,
    ];

    let audioSettings = [
        AVFormatIDKey : kAudioFormatMPEG4AAC,
        AVNumberOfChannelsKey : 1,
        AVSampleRateKey : 44100.0,
        AVEncoderBitRateKey: 96000,
        ] as [String : Any]


    videoInput  = AVAssetWriterInput(mediaType: AVMediaType.video,outputSettings: videoOutputSettings)
    audioInput  = AVAssetWriterInput(mediaType: AVMediaType.audio,outputSettings:audioSettings )

    videoInput?.expectsMediaDataInRealTime = true
    audioInput?.expectsMediaDataInRealTime = true

    assetWriter?.add(videoInput!)
    assetWriter?.add(audioInput!)


       let sharedRecorder = RPScreenRecorder.shared()
       sharedRecorder.isMicrophoneEnabled = true
       sharedRecorder.startCapture(handler: {
           (sample, bufferType, error) in

        //Audio/Video Buffer Data returned from the Screen Recorder
           if CMSampleBufferDataIsReady(sample) {

               DispatchQueue.main.async { [weak self] in

                //Start the Asset Writer if it has not yet started
                   if self?.assetWriter?.status == AVAssetWriter.Status.unknown {
                       if !(self?.assetWriter?.startWriting())! {
                           return
                       }
                       self?.assetWriter?.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sample))
                       self?.startSession = true
                   }

               }
            //Handle errors
               if self.assetWriter?.status == AVAssetWriter.Status.failed {

                   print("Error occured, status = \(String(describing: self.assetWriter?.status.rawValue)), \(String(describing: self.assetWriter?.error!.localizedDescription)) \(String(describing: self.assetWriter?.error))")

                   return
               }
            //Add video buffer to AVAssetWriter Video Input
            if (bufferType == .video)
               {
                   if(self.videoInput!.isReadyForMoreMediaData) && self.startSession {
                       self.videoInput?.append(sample)
                   }
               }
            //Add audio microphone buffer to AVAssetWriter Audio Input
               if (bufferType == .audioMic)
               {
                     print("MIC BUFFER RECEIVED")
                   if self.audioInput!.isReadyForMoreMediaData
                   {
                       print("Audio Buffer Came")
                       self.audioInput?.append(sample)
                   }
               }
           }

       }, completionHandler: {
           error in
           print("COMP HANDLER ERROR", error?.localizedDescription)
       })
}

private func stopRecordingVideo(){
    self.startSession = false
    RPScreenRecorder.shared().stopCapture{ (error) in
        self.videoInput?.markAsFinished()
        self.audioInput?.markAsFinished()

        if error == nil{
            self.assetWriter?.finishWriting{
                self.startSession = false
                print("FINISHED WRITING!")
                DispatchQueue.main.async {
                    self.setUpVideoPreview()
                }
            }
        }else{
            //DELETE DIRECTORY
        }
    }

}

1 个答案:

答案 0 :(得分:3)

我确定您已经解决了这个问题或继续前进,但是对于所有Google员工,您基本上都必须在视频输入上设置mediaTimeScale。您可以看到一个示例here

这是该代码的相关部分(此代码使用的是AVSampleBufferDisplayLayer,但适用相同的概念:

double pts = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer));

if(!timebaseSet && pts != 0)
{
    timebaseSet = true;
    
    CMTimebaseRef controlTimebase;
    CMTimebaseCreateWithMasterClock( CFAllocatorGetDefault(), CMClockGetHostTimeClock(), &controlTimebase );
    
    displayLayer.controlTimebase = controlTimebase;
    CMTimebaseSetTime(displayLayer.controlTimebase, CMTimeMake(pts, 1));
    CMTimebaseSetRate(displayLayer.controlTimebase, 1.0);
}

if([displayLayer isReadyForMoreMediaData])
{
    [displayLayer enqueueSampleBuffer:sampleBuffer];
}