错误"超时" UserInfoNSLocalizedDescription =超时,错误域= SiriSpeechErrorDomain代码= 100(null)

时间:2017-01-03 06:23:08

标签: objective-c speech-recognition speech-to-text speech

我正在开发应用程序,如将语音转换为文本,这是我的代码, 我使用过Apple的框架 - 演讲。

- (void)viewDidLoad {
[super viewDidLoad];

[self.start_btn setEnabled:NO];
}

-(void)viewDidAppear:(BOOL)animated
{

self.speechRecognizer = [[SFSpeechRecognizer alloc]initWithLocale:[NSLocale localeWithLocaleIdentifier:@"en-US"]];

self.speechRecognizer.delegate = self;

[SFSpeechRecognizer requestAuthorization:^(SFSpeechRecognizerAuthorizationStatus authStatus) {
    switch (authStatus) {
        case SFSpeechRecognizerAuthorizationStatusAuthorized:
            //User gave access to speech recognition
            NSLog(@"Authorized");

            [self.start_btn setEnabled:YES];

            break;

        case SFSpeechRecognizerAuthorizationStatusDenied:
            //User denied access to speech recognition
            NSLog(@"AuthorizationStatusDenied");

            [self.start_btn setEnabled:NO];
            break;

        case SFSpeechRecognizerAuthorizationStatusRestricted:
            //Speech recognition restricted on this device
            NSLog(@"AuthorizationStatusRestricted");

            [self.start_btn setEnabled:NO];

            break;

        case SFSpeechRecognizerAuthorizationStatusNotDetermined:
            //Speech recognition not yet authorized
            [self.start_btn setEnabled:NO];

            break;

        default:
            NSLog(@"Default");
            break;
        }
        }];

        }


       - (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
       }

     -(void)start_record{
//CAncel the previous task if it's running

NSError * outError;

AVAudioSession *audioSession = [AVAudioSession sharedInstance];

[audioSession setCategory:AVAudioSessionCategoryRecord error:&outError];
[audioSession setMode:AVAudioSessionModeMeasurement error:&outError];
[audioSession setActive:YES withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation  error:&outError];

SFSpeechAudioBufferRecognitionRequest *request2 = [[SFSpeechAudioBufferRecognitionRequest alloc] init];

self.audioEngine = [[AVAudioEngine alloc]init];

AVAudioInputNode *inputNode = self.audioEngine.inputNode;

if (request2 == nil) {
    NSLog(@"Unable to created a SFSpeechAudioBufferRecognitionRequest object");
}
if (inputNode == nil) {
    NSLog(@"Audio engine has no input node ");


}

//configure request so that results are returned before audio recording is finished

request2.shouldReportPartialResults = YES;

self.recognitionTask = [self.speechRecognizer recognitionTaskWithRequest:request2 resultHandler:^(SFSpeechRecognitionResult * result, NSError *  error1) {


    BOOL isFinal = false;

    if(result !=  nil)
    {
        self.speech_txt.text = result.bestTranscription.formattedString;

      //  NSLog(@" the result:%@",result.bestTranscription.formattedString);

        NSLog(@"%@",self.speech_txt.text);


        isFinal = result.isFinal;

    }

    if (error1 != nil || isFinal) {

        [self.audioEngine stop];
        [inputNode removeTapOnBus:0];
//            [self.audioEngine stop];
      //  [self.recognitionRequest endAudio];

        self.recognitionRequest = nil;
        self.recognitionTask = nil;

       [self.start_btn setEnabled:YES];

        [self.start_btn setTitle:@"Start Recording" forState:UIControlStateNormal];

    }

    }];

    AVAudioFormat *recordingFormat =  [inputNode outputFormatForBus:0];

    [inputNode installTapOnBus:0 bufferSize:1024 format:recordingFormat block:^(AVAudioPCMBuffer * _Nonnull buffer, AVAudioTime * _Nonnull when){


     [self.recognitionRequest appendAudioPCMBuffer:buffer];

}];
     NSError *error1;
     [self.audioEngine prepare];
     [self.audioEngine startAndReturnError:&error1];
     self.speech_txt.text = @"(Go ahead , I'm listening)";

     }

//MARK: SFSpeechRecognizerDelegate

    -(void)speechRecognizer:(SFSpeechRecognizer *)speechRecognizer availabilityDidChange:(BOOL)available
    {
if (available) {

    [self.start_btn setEnabled:YES];
    [self.start_btn setTitle:@"Start Recording" forState:UIControlStateNormal];


}
else{
    [self.start_btn setEnabled:NO];
    [self.start_btn setTitle:@"Recognition not available" forState:UIControlStateDisabled];

   }}


   - (IBAction)start_btn_action:(id)sender {

if (self.audioEngine.isRunning) {

    [self.audioEngine stop];

    [self.recognitionRequest endAudio];
    [self.start_btn setEnabled:NO];

    [self.start_btn setTitle:@"Stopping" forState:UIControlStateDisabled];



     }
    else{
    [self start_record];

    [self.start_btn setTitle:@"Stop Recording" forState:@""];

     }}

我已经实现了这段代码,在运行时显示的错误如下:

  

[实用程序] + [AFAggregator logDictationFailedWithError:]错误域= kAFAssistantErrorDomain代码= 203"超时" UserInfo = {NSLocalizedDescription = Timeout,NSUnderlyingError = 0x170250140 {Error Domain = SiriSpeechErrorDomain Code = 100"(null)"}}

我该如何解决这个问题?

0 个答案:

没有答案