捕获音频样本以将音频实时推送到服务器

时间:2020-09-07 04:37:16

标签: ios swift iphone

我使用LFLiveKit仅从设备直播视频,并且运行良好。现在,我想推送一个音频文件以与其一起播放。我们正在使用带有rtmp链接的WOWZA服务器进行流和回放。我使用的代码会随机播放歌曲10到15秒钟,它还会将视频流挂起一段时间。会话开始后我推送音频。任何解决此问题的帮助将不胜感激。

如果有人想检查库中音频的编码方式 LFLiveKit-https://github.com/LaiFengiOS/LFLiveKit

lazy var session: LFLiveSession = {
        let audioConfiguration = LFLiveAudioConfiguration.defaultConfiguration(for: .medium)
        audioConfiguration?.numberOfChannels = 1
        let videoConfiguration = LFLiveVideoConfiguration.defaultConfiguration(for: .high3)
        let session = LFLiveSession(audioConfiguration: audioConfiguration, videoConfiguration: videoConfiguration, captureType: .captureMaskVideoInputAudio)
        session?.captureDevicePosition = .back
        session?.delegate = self
        session?.preView = self.videView
        session?.showDebugInfo = true
        return session!
    }()

func documentPicker(_ controller: UIDocumentPickerViewController, didPickDocumentsAt urls: [URL]) {
        if controller.documentPickerMode == .import{
            let firstURL = urls[0] //song path
            let songAsset = AVAsset(url: firstURL)
            loopAmplitudes(audioFileUrl: firstURL)
    }
}

func loopAmplitudes(audioFileUrl: URL) {
        let asset = AVAsset(url: audioFileUrl)
        let reader = try! AVAssetReader(asset: asset)
        let track = asset.tracks(withMediaType: AVMediaType.audio)[0]

        let settings = [
            AVFormatIDKey : kAudioFormatLinearPCM,
            AVNumberOfChannelsKey: 1,
            AVLinearPCMBitDepthKey: 16,
            AVSampleRateKey: track.naturalTimeScale,
            AVLinearPCMIsNonInterleaved: false,
            AVLinearPCMIsFloatKey: false,
            AVLinearPCMIsBigEndianKey: false,
        ] as [String : Any]
        
        let readerOutput = AVAssetReaderTrackOutput(track: track, outputSettings: settings)
        reader.add(readerOutput)
        reader.startReading()

        while let sampleBuffer = readerOutput.copyNextSampleBuffer() {
            
            var audioBufferList = AudioBufferList(mNumberBuffers: 1, mBuffers: AudioBuffer(mNumberChannels: 0, mDataByteSize: 0, mData: nil))
            var blockBuffer: CMBlockBuffer?
            
            CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, bufferListSizeNeededOut: nil, bufferListOut: &audioBufferList, bufferListSize: MemoryLayout<AudioBufferList>.size, blockBufferAllocator: nil, blockBufferMemoryAllocator: nil, flags: kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment, blockBufferOut: &blockBuffer)
            
            let buffers = UnsafeBufferPointer<AudioBuffer>(start: &audioBufferList.mBuffers, count: Int(audioBufferList.mNumberBuffers))
            
            for audioBuffer in buffers {
                let audio = audioBuffer.mData!.assumingMemoryBound(to: UInt8.self)   //WORKING PARTIALLY
                let newdata = Data(bytes: audio, count: Int(audioBuffer.mDataByteSize))
                session.pushAudio(newdata)
            }
        }
    } 

0 个答案:

没有答案