我面临Webspeech API角度应用程序的问题。 API语音输出再次用作API的输入。由于耳机/耳机在附近并且扬声器和麦克风在附近,因此API输出音频再次被视为输入。我真的很感谢有人能对此提供帮助。
下面是我在应用程序中使用的代码示例。有什么方法可以阻止
import { SpeechNotification } from './model/speech-notification';
import {ChatconfigrationService} from './service/chatconfigration.service'
import { SpeechError } from './model/speech-error';
import { from } from 'zen-observable';
const { webkitSpeechRecognition, SpeechRecognition, webkitSpeechGrammarList, webkitSpeechRecognitionEvent, SpeechGrammarList, SpeechRecognitionEvent} = (window as any)
const outerthis = this;
@Component({
selector: 'app-chatbot',
templateUrl: './chatbot.component.html',
styleUrls: ['./chatbot.component.scss']
})
export class ChatbotComponent implements OnInit {
recognizing:boolean = false;
notification: string;
currentLanguage: string;
recognition: any;
speechRecz:any;
speechReconizationObject:any
speechText:string
chatSettings:any;
repeatedTimes:any
constructor(private speechRecognizer: SpeechRecognizerService,
private changeDetector: ChangeDetectorRef,
private userchatconfiguration: ChatconfigrationService)
{ }
ngOnInit() {
this.speechRecognizer.initialize();
this.speechText='Hello!, We are listening you?'
this.getUserchatSetting();
}
getUserchatSetting() {
this.userchatconfiguration.getUserChatbotSetting()
.subscribe(data => {
this.chatSettings = data;
this.repeatedTimes = data.chatbotVoiceOneRepeateTimes
setTimeout(()=> {
this.welcomeMessage()
}, 3000);
});
}
welcomeMessage() {
this.speechReconizationObject = new SpeechSynthesisUtterance(this.speechText)
this.speechReconizationObject.rate = this.chatSettings.rateOfSpeech;
window.speechSynthesis.speak(this.speechReconizationObject);
this.speechText = this.chatSettings.welcomeMessage
this.speechRecognizer.start();
this.initRecognition();
}
private initRecognition() {
this.speechRecognizer.onStart().subscribe(data => {
this.recognizing = true;
this.notification = 'I\'m listening...';
console.info('This is speech recognize stated point');
this.detectChanges();
});
this.speechRecognizer.onEnd().subscribe(data => {
this.recognizing = false;
this.notification = 'speech reconization is ended';
console.info('This is speech recognize end point');
});
this.speechRecognizer.onnomatch().subscribe(data => {
})
this.speechRecognizer.onResult().subscribe((data: SpeechNotification) => {
let message = data.content.trim();
if (data.info === 'final_transcript' && message.length > 0) {
console.info('entered into the final result')
console.log(data)
this.recognizing = false;
}
});
}
detectChanges = () => {
const outerthis = this;
(function myLoop(i) {
setTimeout(function () {
var msg = new SpeechSynthesisUtterance(outerthis.speechText);
this.speechReconizationObject = new SpeechSynthesisUtterance(this.speechText)
msg.rate = outerthis.chatSettings.rateOfSpeech;
console.info(i);
if (i == 1) {
outerthis.speechRecognizer.stop();
return false;
} else {
window.speechSynthesis.speak(msg);
}
if (--i)
myLoop(i); // decrement i and call myLoop again if i > 0
}, 4000);
})(this.repeatedTimes);
}
}