我正在播放音频文件,它完美无缺,我也在做语音识别任务,而且效果也很好。但是当出现以下情况时,它会停止流式传输音频文件。
以下是我在ViewController.m中的代码
//
// ViewController.swift
// RecordAndPlay
//
// Created by Obaid on 8/15/17.
// Copyright © 2017 test. All rights reserved.
//
import UIKit
import AVFoundation
import AudioToolbox
import Speech
class ViewController: UIViewController,SFSpeechRecognizerDelegate {
@IBOutlet var imgBackground: UIImageView!
@IBOutlet var lblTextValue: UILabel!
@IBOutlet var btnPlay: UIButton!
@IBOutlet var btnRepeatIt: UIButton!
@IBOutlet var microphoneButton: UIButton!
var stringValue: String!
var indexValue=0
var playButton:UIButton?
var player: AVPlayer! = nil
var playerItem:AVPlayerItem?
private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))!
var recordButton: UIButton!
var recordingSession: AVAudioSession!
var audioRecorder: AVAudioRecorder!
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
private var recognitionTask: SFSpeechRecognitionTask?
private let audioEngine = AVAudioEngine()
var arrayOfStrings: [String] = ["good morning", "good evening", "good night", "good afternoon", "well developed"]
var arrayOfStringsURL: [String] = ["https://storage.googleapis.com/abdul-sample-bucket/test/sampl1.mp3", "http://dictionary.cambridge.org/media/english/us_pron/u/usc/uscld/uscld00580.mp3", "http://dictionary.cambridge.org/media/english/us_pron/u/usc/uscld/uscld00588.mp3", "http://dictionary.cambridge.org/media/english/us_pron/u/usc/uscld/uscld00579.mp3", "http://dictionary.cambridge.org/media/english/uk_pron/u/ukw/ukwel/ukwelde019.mp3"]
override func viewDidLoad() {
super.viewDidLoad()
initializeSpeechRecognition()
// Do any additional setup after loading the view, typically from a nib.
//createLabels()
check_record_permission()
lblTextValue.text=arrayOfStrings[0]
imgBackground.backgroundColor=UIColor.yellow
imgBackground.frame = CGRect(x: 0, y: 140, width: self.view.frame.width, height: self.imgBackground.frame.height)
btnPlay.setImage(UIImage(named: "play.png"), for: .normal)
let xOrigin=self.view.frame.width - 70
btnPlay.frame=CGRect(x:xOrigin, y: self.btnPlay.frame.origin.y, width:self.btnPlay.frame.width, height: self.btnPlay.frame.height)
btnRepeatIt.isHidden=true
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
var btnClicked=0
@IBAction func start_play(_ sender: UIButton){
let url = URL(string: arrayOfStringsURL[indexValue])
print(arrayOfStringsURL[indexValue])
let playerItem:AVPlayerItem = AVPlayerItem(url: url!)
player=nil
player = AVPlayer(playerItem: playerItem)
player!.play()
imgBackground.backgroundColor=UIColor.green
lblTextValue.textColor=UIColor.white
lblTextValue.text="Repeat it!"
btnClicked=1
microphoneButton.isHidden=false
microphoneButton.frame=btnPlay.frame
microphoneButton.setImage(UIImage(named: "record.png"), for: .normal)
btnPlay.isHidden=true
btnRepeatIt.isHidden=false
}
@IBAction func repeatTheSound(_ sender: UIButton){
print("repeat the sound")
let url = URL(string: arrayOfStringsURL[1])
let playerItem:AVPlayerItem = AVPlayerItem(url: url!)
player=nil
player = AVPlayer(playerItem: playerItem)
player!.play()
}
func initializeSpeechRecognition(){
microphoneButton.isHidden=true
speechRecognizer.delegate = self
SFSpeechRecognizer.requestAuthorization { (authStatus) in
var isButtonEnabled = false
switch authStatus {
case .authorized:
isButtonEnabled = true
case .denied:
isButtonEnabled = false
print("User denied access to speech recognition")
case .restricted:
isButtonEnabled = false
print("Speech recognition restricted on this device")
case .notDetermined:
isButtonEnabled = false
print("Speech recognition not yet authorized")
}
OperationQueue.main.addOperation() {
if(self.btnClicked==1){
self.btnPlay.isEnabled = isButtonEnabled
}
}
}
}
var isAudioRecordingGranted: Bool!
func check_record_permission()
{
switch AVAudioSession.sharedInstance().recordPermission() {
case AVAudioSessionRecordPermission.granted:
isAudioRecordingGranted = true
break
case AVAudioSessionRecordPermission.denied:
isAudioRecordingGranted = false
break
case AVAudioSessionRecordPermission.undetermined:
AVAudioSession.sharedInstance().requestRecordPermission() { [unowned self] allowed in
DispatchQueue.main.async {
if allowed {
self.isAudioRecordingGranted = true
} else {
self.isAudioRecordingGranted = false
}
}
}
break
default:
break
}
}
//Speech Recognition
@IBAction func microphoneTapped(_ sender: AnyObject) {
if audioEngine.isRunning {
audioEngine.stop()
recognitionRequest?.endAudio()
} else {
startRecording()
}
}
func startRecording() {
if recognitionTask != nil { //1
recognitionTask?.cancel()
recognitionTask = nil
}
let audioSession = AVAudioSession.sharedInstance() //2
do {
try audioSession.setCategory(AVAudioSessionCategoryRecord)
try audioSession.setMode(AVAudioSessionModeMeasurement)
try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
}
recognitionRequest = SFSpeechAudioBufferRecognitionRequest() //3
guard let inputNode = audioEngine.inputNode else {
fatalError("Audio engine has no input node")
} //4
guard let recognitionRequest = recognitionRequest else {
fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
} //5
recognitionRequest.shouldReportPartialResults = true //6
recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in //7
var isFinal = false //8
if result != nil {
self.stringValue = result?.bestTranscription.formattedString //9
isFinal = (result?.isFinal)!
if(isFinal){
self.stringValue = self.stringValue.lowercased()
print(self.stringValue)
if(self.stringValue.contains(self.arrayOfStrings[0])){
self.imgBackground.backgroundColor=UIColor.green
self.lblTextValue.text="Well Done"
self.stringValue=""
self.updatePage()
}
else{
self.imgBackground.backgroundColor=UIColor.red
self.lblTextValue.text="Wrong! Try Again"
self.stringValue=""
}
}
self.stopRecording()
}
if error != nil || isFinal { //10
self.audioEngine.stop()
inputNode.removeTap(onBus: 0)
self.recognitionRequest = nil
self.recognitionTask = nil
}
})
let recordingFormat = inputNode.outputFormat(forBus: 0) //11
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
self.recognitionRequest?.append(buffer)
}
audioEngine.prepare() //12
do {
try audioEngine.start()
} catch {
print("audioEngine couldn't start because of an error.")
}
}
func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
if available {
microphoneButton.isEnabled = true
} else {
microphoneButton.isEnabled = false
}
}
func updatePage(){
print(indexValue)
if(indexValue<self.arrayOfStrings.count){
indexValue += 1
self.imgBackground.backgroundColor=UIColor.yellow
self.lblTextValue.textColor=UIColor.black
self.lblTextValue.text=self.arrayOfStrings[indexValue]
self.btnPlay.isHidden=false
self.microphoneButton.isHidden=true
}
else{
indexValue=0
}
}
func stopRecording(){
if audioEngine.isRunning {
audioEngine.stop()
recognitionRequest?.endAudio()
self.recognitionTask = nil
}
}
}
startRecording()方法是我启动speechRecognition的时候 start_play()方法是我播放音频的时候
在启动speechRecognition后需要停止的背景中会发生一些事情。