具有AVPlayer音频的SFSpeechAudioBufferRecognitionRequest

时间:2019-03-20 04:04:31

标签: swift speech-recognition avplayer audiobuffer

是否可以将SFSpeechRecognizerAVPlayer音频一起使用?我正在尝试在播放时转录音频,但不起作用。

我正在使用MTAudioProcessing来获取CMSampleBuffer,然后可以附加到SFSpeechAudioBufferRecognitionRequest上,

但总是有相同的错误:

  

错误域= kAFAssistantErrorDomain代码= 203“超时”和音频   它停止了。

也许有人可以帮助我! 这是我的代码。

谢谢!

init() {
   self.recognitionRequest.shouldReportPartialResults = true
   SFSpeechRecognizer()?.recognitionTask(with: self.recognitionRequest, resultHandler: { (result, error) in
         if let error = error {
             NSLog("Error \(error)")
          } else {
              NSLog("Transcript \(result?.bestTranscription.formattedString)")
              if (result?.isFinal == true) {}
          }
      })
}

func play() {
   self.avPlayer.play()
   self.installTap(playerItem: self.avPlayer.currentItem!)
}

func installTap(playerItem: AVPlayerItem) {
    if (playerItem.asset.tracks(withMediaType: AVMediaType.audio).count > 0) {
            var callbacks = MTAudioProcessingTapCallbacks(
                version: kMTAudioProcessingTapCallbacksVersion_0,
                clientInfo:nil,
                init: nil,
                finalize: nil,
                prepare: tapPrepare,
                unprepare: nil,
                process: tapProcess)

            var tap: Unmanaged<MTAudioProcessingTap>?
            let err = MTAudioProcessingTapCreate(kCFAllocatorDefault, &callbacks, kMTAudioProcessingTapCreationFlag_PostEffects, &tap)
            assert(noErr == err);

            let audioTrack = playerItem.asset.tracks(withMediaType: AVMediaType.audio).first!
            let inputParams = AVMutableAudioMixInputParameters(track: audioTrack)
            inputParams.audioTapProcessor = tap?.takeRetainedValue()

            let audioMix = AVMutableAudioMix()
            audioMix.inputParameters = [inputParams]

            playerItem.audioMix = audioMix
        }
    }


let tapPrepare: MTAudioProcessingTapPrepareCallback = {
        (tap, itemCount, basicDescription) in
      self.audioFormat = AudioStreamBasicDescription(mSampleRate: basicDescription.pointee.mSampleRate,
                                                                 mFormatID: basicDescription.pointee.mFormatID, mFormatFlags: basicDescription.pointee.mFormatFlags, mBytesPerPacket: basicDescription.pointee.mBytesPerPacket, mFramesPerPacket: basicDescription.pointee.mFramesPerPacket, mBytesPerFrame: basicDescription.pointee.mBytesPerFrame, mChannelsPerFrame: basicDescription.pointee.mChannelsPerFrame, mBitsPerChannel: basicDescription.pointee.mBitsPerChannel, mReserved: basicDescription.pointee.mReserved)
}

let tapProcess: MTAudioProcessingTapProcessCallback = {
        (tap, numberFrames, flags, bufferListInOut, numberFramesOut, flagsOut) in

        var sbuf : CMSampleBuffer?
        var status : OSStatus?
        var format: CMFormatDescription?

        var formatId =  UInt32(kAudioFormatLinearPCM)
        var formatFlags = UInt32( kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked )

        guard var audioFormat = self.audioFormat else {
            return
        }
        status = CMAudioFormatDescriptionCreate(allocator: kCFAllocatorDefault, asbd: &audioFormat, layoutSize: 0, layout: nil, magicCookieSize: 0, magicCookie: nil, extensions: nil, formatDescriptionOut: &format)
        if status != noErr {
            print("Error CMAudioFormatDescriptionCreater :\(String(describing: status?.description))")
            return
        }

        var timing = CMSampleTimingInfo(duration: CMTimeMake(value: 1, timescale: Int32(audioFormat.mSampleRate)), presentationTimeStamp: self.playerItem!.currentTime(), decodeTimeStamp: CMTime.invalid)

        let buffer = CMSampleBufferCreate(allocator: kCFAllocatorDefault,
                                          dataBuffer: nil,
                                          dataReady: Bool(truncating: 0),
                                          makeDataReadyCallback: nil,
                                          refcon: nil,
                                          formatDescription: format,
                                          sampleCount: CMItemCount(numberFrames),
                                          sampleTimingEntryCount: 1,
                                          sampleTimingArray: &timing,
                                          sampleSizeEntryCount: 0, sampleSizeArray: nil,
                                          sampleBufferOut: &sbuf);
        CMSampleBufferSetDataBufferFromAudioBufferList(sbuf!, blockBufferAllocator: kCFAllocatorDefault, blockBufferMemoryAllocator: kCFAllocatorDefault, flags: 0, bufferList: bufferListInOut)
        self.recognitionRequest.appendAudioSampleBuffer(sbuf!)
}

0 个答案:

没有答案