请帮助您如何访问事件处理函数中的类的属性。
我想在网络语音API的句柄函数中更新类的属性,尽管它总是失败,但我不知道如何将类对象转移到事件处理函数。
以下代码中的错误代码是“无法读取未定义的属性'resultIndex'”
class SpeechToText {
constructor() {
this.recognition = null;
this.text = 'original';
this.getDataFromRecognition = this.getDataFromRecognition.bind(this);
}
getDataFromRecognition(event){
console.log(this.text);
var final_transcript = '';
var interim_transcript = '';
for (var i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
final_transcript += event.results[i][0].transcript;
console.log('result is Final' + final_transcript);
} else {
interim_transcript += event.results[i][0].transcript;
console.log('interim transcript' + interim_transcript);
}
}
this.text = final_transcript;
}
listening() {
let recognition = new window.webkitSpeechRecognition();
recognition.lang = 'zh-CN';
recognition.interimResults = false;
recognition.continuous = false;
recognition.start();
recognition.onresult = this.getDataFromRecognition(event);
recognition.onspeechend = function() {
this.stop();
}
recognition.onnomatch = function(event) {
console.log( "It is no match.");
}
recognition.onerror = function(event) {
console.log('Error occurred in recognition: ' + event.error);
}
}
}
#
在以下代码中,函数“ this.onresult = function(event)”中的this.text将创建SpeechRecognition的新属性。
class SpeechToText {
constructor() {
this.text = 'original';
this.recognition = null;
}
setText(str){
this.text = str;
return this.text;
console.log('this in on setText ' + this);
}
getText(){
return this.text;
}
listening() {
this.recognition = new window.webkitSpeechRecognition();
this.recognition.lang = 'zh-CN';
//this.recognition.lang = 'en-GB';
this.recognition.interimResults = false;
this.recognition.continuous = false;
this.recognition.start();
console.log("this under class" + this);
this.recognition.onresult = function(event) {
// The SpeechRecognitionEvent results property returns a SpeechRecognitionResultList object
// The SpeechRecognitionResultList object contains SpeechRecognitionResult objects.
// It has a getter so it can be accessed like an array
// The [last] returns the SpeechRecognitionResult at the last position.
// Each SpeechRecognitionResult object contains SpeechRecognitionAlternative objects that contain individual results.
// These also have getters so they can be accessed like arrays.
// The [0] returns the SpeechRecognitionAlternative at position 0.
// We then return the transcript property of the SpeechRecognitionAlternative object
this.recognition = new window.webkitSpeechRecognition();
var final_transcript = '';
var interim_transcript = '';
for (var i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
final_transcript += event.results[i][0].transcript;
console.log('result is Final' + final_transcript);
} else {
interim_transcript += event.results[i][0].transcript;
console.log('interim transcript' + interim_transcript);
}
}
//_utteranceForEdgeTrigger = final_transcript;
//final_transcript = capitalize(final_transcript);
console.log('final_transcript' + final_transcript);
//this.setText(final_transcript);
this.text = final_transcript;
console.log('the text is in listening function :' + this.text);
console.log('the this in on result is ' + this);
//window.SpeechToText.getText();
}
this.recognition.onspeechend = function() {
console.log('this in function onspeechend' + this);
this.stop();
console.log('speech end')
}
this.recognition.onnomatch = function(event) {
console.log( "I didn't recognise that color.");
}
this.recognition.onerror = function(event) {
console.log('Error occurred in recognition: ' + event.error);
}
}
}
var speech = new SpeechToText();
//speech.getbx();
speech.listening();
}