在我使用twillo框架拨打电话并再次开始收听之前,此方法工作正常。崩溃并显示以下错误
所需条件为假:format.sampleRate == hwFormat.sampleRate'
它在此行崩溃
inputNode?.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) {[weak self] (buffer, when) in
self?.recognitionRequest?.append(buffer)
}
这是完整的代码
class MySppechRecognizer: NSObject,SFSpeechRecognizerDelegate {
/// initalize the speech recognitior . it is a shared instace.
static let speechSharedInstance = MySppechRecognizer()
var isSppechRecognisationAvaible = true
var speechRecognizer:SFSpeechRecognizer? = nil
var audioSession = AVAudioSession.sharedInstance()
var audioEngine = AVAudioEngine()
var recognitionTask: SFSpeechRecognitionTask?
var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
var isFinalWord = false
var inputNode:AVAudioInputNode? = nil
var callBack:jimboSpeechCallBack? = nil
var isHotWordDetectedForApp = false
func setSpeechRec() {
if speechRecognizer == nil {
speechRecognizer = SFSpeechRecognizer(locale: kAppLocal)
speechRecognizer?.delegate = self
}
}
}
//MARK:- Delegate
func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
print("Availibility changes")
}
//MARK:- Audio engine
func startRecording(){
if recognitionTask != nil {
self.recognitionRequest?.endAudio()
recognitionTask?.cancel()
recognitionTask = nil
recognitionRequest = nil
inputNode?.reset()
inputNode?.removeTap(onBus: 0)
inputNode?.reset()
stopRecording()
}
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: .measurement)
try audioSession.setMode(.measurement)
try audioSession.setPreferredSampleRate(44100)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
}
inputNode = audioEngine.inputNode
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
recognitionRequest?.shouldReportPartialResults = true
recognitionTask = MySppechRecognizer.speechSharedInstance.speechRecognizer?.recognitionTask(with: recognitionRequest!, resultHandler: { (result, error) in
print("Result is===\(String(describing: result?.bestTranscription.formattedString))")
var isFinal = false
if result != nil {
isFinal = (result?.isFinal)!
self.isFinalWord = (result?.isFinal)!
}
if error != nil || isFinal {
self.audioEngine.stop()
self.inputNode?.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
}
if error != nil {
print(“Error === \(String(describing: error?.localizedDescription))")
self.isFinalWord = true
}
guard self.callBack == nil else {
self.callBack!(result,error)
return
}
})
let recordingFormat = inputNode?.outputFormat(forBus: 0)
inputNode?.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) {[weak self] (buffer, when) in
self?.recognitionRequest?.append(buffer)
}
audioEngine.prepare()
do {
try audioEngine.start()
} catch {
print("audioEngine couldn't start because of an error.")
}
}
/// to stop the audio session
func stopRecording() {
DispatchQueue.main.async {
if ((self.audioEngine.isRunning)){
self.recognitionRequest?.endAudio()
self.recognitionTask?.cancel()
self.recognitionTask = nil
self.recognitionRequest = nil
self.inputNode?.reset()
self.inputNode?.removeTap(onBus: 0)
self.inputNode?.reset()
self.audioEngine.inputNode.reset()
}
}
}
}