Swift 3& S中的语音识别错误iOS 10

时间:2017-02-19 18:41:54

标签: ios swift speech-recognition ios10

我使用iPhone 6s plus,这是语音识别viewcontroller的代码:

import Speech
import UIKit

protocol SpeechRecognitionDelegate: class {
    func speechRecognitionComplete(query: String?)
    func speechRecognitionCancelled()
}

class SpeechRecognitionViewController: UIViewController, SFSpeechRecognizerDelegate {

    var textView: UITextView!

    private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))
    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
    private var recognitionTask: SFSpeechRecognitionTask?
    private let audioEngine = AVAudioEngine()
    private var query: String?
    weak var delegate: SpeechRecognitionDelegate?
    var isListening: Bool = false

    init(delegate: SpeechRecognitionDelegate, frame: CGRect) {
        super.init(nibName: nil, bundle: nil)
        self.delegate = delegate
        self.view.frame = frame
    }

    required init?(coder aDecoder: NSCoder) {
        fatalError("init(coder:) has not been implemented")
    }

    enum ErrorMessage: String {
        case denied = "To enable Speech Recognition go to Settings -> Privacy."
        case notDetermined = "Authorization not determined - please try again."
        case restricted = "Speech Recognition is restricted on this device."
        case noResults = "No results found - please try a different search."
    }



    func displayErrorAlert(message: ErrorMessage) {
        let alertController = UIAlertController(title: nil,
                                                message: message.rawValue,
                                                preferredStyle: .alert)
        let alertAction = UIAlertAction(title: "OK", style: .default, handler: nil)
        alertController.addAction(alertAction)
        OperationQueue.main.addOperation {
            self.present(alertController, animated: true, completion: nil)
        }
    }

    override func viewDidLoad() {
        super.viewDidLoad()

    }

    override func viewWillAppear(_ animated: Bool) {
        super.viewWillAppear(animated)

        speechRecognizer?.delegate = self

        //initialize textView and add it to self.view
    }

    func startListening() {
        guard !isListening else {return}
        isListening = true

        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
        guard let recognitionRequest = recognitionRequest else {
            print("SpeechRecognitionViewController recognitionRequest \(self.recognitionRequest)")
            return
        }

        recognitionRequest.shouldReportPartialResults = true

        recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in
            var isFinal = false

            if result != nil {
                self.query = result?.bestTranscription.formattedString
                self.textView.text = self.query
                isFinal = (result?.isFinal)!
            }

            if error != nil || isFinal {
                print("recognitionTask error = \(error?.localizedDescription)")
                self.stopListening()
            }
        })

        let audioSession = AVAudioSession.sharedInstance()
        do {
            try audioSession.setCategory(AVAudioSessionCategoryRecord)
            try audioSession.setMode(AVAudioSessionModeMeasurement)
            try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
        } catch {
            print("Audio session isn't configured correctly")
        }

        let recordingFormat = audioEngine.inputNode?.outputFormat(forBus: 0)
        audioEngine.inputNode?.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, time) in
            self.recognitionRequest?.append(buffer)
        }

        audioEngine.prepare()

        do {
            try audioEngine.start()
            textView.text = "Listening..."
        } catch {
            print("Audio engine failed to start")
        }
    }

    func stopListening() {
        guard isListening else {return}
        audioEngine.stop()
        audioEngine.inputNode?.removeTap(onBus: 0)
        recognitionRequest = nil
        recognitionTask = nil
        isListening = false
    }

    // MARK: SFSpeechRecognizerDelegate

    func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
        if !available {
            let alertController = UIAlertController(title: nil,
                                                    message: "Speech Recognition is currently unavailable.",
                                                    preferredStyle: .alert)
            let alertAction = UIAlertAction(title: "OK", style: .default) { (alertAction) in
                .self.stopListening()
            }
            alertController.addAction(alertAction)
            present(alertController, animated: true)
        }
    }
}

此VC嵌入在另一个viewcontroller中。 在父视图控件中点击按钮时,将调用startListening()。当再次点击相同的按钮时,会调用stopListening()

语音识别第一次工作得很好。在第二次尝试我得到这个错误(我想它与语法加载有关?):

recognitionTask error = Optional("The operation couldn’t be completed. (kAFAssistantErrorDomain error 209.)") 

语音识别不再适用。 30秒后,我收到超时错误:

Optional(Error Domain=kAFAssistantErrorDomain Code=203 "Timeout" UserInfo={NSLocalizedDescription=Timeout, NSUnderlyingError=0x170446f90 {Error Domain=SiriSpeechErrorDomain Code=100 "(null)"}})

原始代码在这里SayWhat

我错过了什么?

1 个答案:

答案 0 :(得分:3)

我所要做的就是在试图停止聆听时添加recognitionRequest?.endAudio()

func stopListening() {
    guard isListening else {return}
    audioEngine.stop()
    audioEngine.inputNode?.removeTap(onBus: 0)
    // Indicate that the audio source is finished and no more audio will be appended
    recognitionRequest?.endAudio()
    recognitionRequest = nil
    recognitionTask = nil
    isListening = false
}