苹果演讲api很久才认出来

时间:2018-02-28 11:34:36

标签: ios speech-recognition swift4 apple-speech

我使用Apple语音识别语音一小时,但Apple语音只识别一分钟的语音。

我读到我可以提出更多一个请求来识别语音超过一分钟,但我不知道如何。

这是我的代码

import UIKit
import Speech

public class ViewController: UIViewController, SFSpeechRecognizerDelegate {
    // MARK: Properties

    private let speechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "ar_SA"))!

    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?

    private var recognitionTask: SFSpeechRecognitionTask?

    private let audioEngine = AVAudioEngine()

    @IBOutlet var textView : UITextView!

    @IBOutlet var recordButton : UIButton!

     var inString = ""
    public override func viewDidLoad() {
        super.viewDidLoad()

        speechRecognizer.delegate = self

        SFSpeechRecognizer.requestAuthorization { authStatus in
            /*
             The callback may not be called on the main thread. Add an
             operation to the main queue to update the record button's state.
             */
            OperationQueue.main.addOperation {
                switch authStatus {
                case .authorized:
                    print("Dalal")

                case .denied:
                    print("Dalal2")
                case .restricted:
                    print("Dalal3")
                case .notDetermined:
                    print("Dalal4")
                }
            }
        }
        // Disable the record buttons until authorization has been granted.
        try! startRecording()
    }



    private func startRecording() throws {

        // Cancel the previous task if it's running.
        if let recognitionTask = recognitionTask {
            recognitionTask.cancel()
            self.recognitionTask = nil
        }

        let audioSession = AVAudioSession.sharedInstance()
        try audioSession.setCategory(AVAudioSessionCategoryRecord)
        try audioSession.setMode(AVAudioSessionModeMeasurement)
        try audioSession.setActive(true, with: .notifyOthersOnDeactivation)

        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

        guard let inputNode = audioEngine.inputNode else { fatalError("Audio engine has no input node") }
        guard let recognitionRequest = recognitionRequest else { fatalError("Unable to created a SFSpeechAudioBufferRecognitionRequest object") }

        // Configure request so that results are returned before audio recording is finished
        recognitionRequest.shouldReportPartialResults = true


        // A recognition task represents a speech recognition session.
        // We keep a reference to the task so that it can be cancelled.
        recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest) { result, error in
            var isFinal = false
            let fileName = "Test"
            let dir = try? FileManager.default.url(for: .documentDirectory,
                                                   in: .userDomainMask, appropriateFor: nil, create: true)

            // If the directory was found, we write a file to it and read it back
            if let fileURL = dir?.appendingPathComponent(fileName).appendingPathExtension("txt") {

                // Write to the file named Test
                do {
                    if let result = result {
                        self.textView.text = result.bestTranscription.formattedString
                        isFinal = result.isFinal
                    }
                    try self.textView.text.write(to: fileURL, atomically: true, encoding: .utf8)
                } catch {
                    print("Failed writing to URL: \(fileURL), Error: " + error.localizedDescription)
                }


            if error != nil || isFinal {
                self.audioEngine.stop()
              //  self.addp()
                inputNode.removeTap(onBus: 0)

                self.recognitionRequest = nil
                self.recognitionTask = nil


            }
            do {
                self.inString = try String(contentsOf: fileURL)
            } catch {
                print("Failed reading from URL: \(fileURL), Error: " + error.localizedDescription)
            }
                print("Read from the file: \(self.inString)")
        }

        }

        let recordingFormat = inputNode.outputFormat(forBus: 0)
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
            self.recognitionRequest?.append(buffer)
        }

        audioEngine.prepare()

        try audioEngine.start()

        textView.text = "(listening)"
    }


    public func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
       print("any text")
    }

}//end class

任何建议或帮助? 谢谢。

0 个答案:

没有答案