我已经设置了语音识别文本区域,所以我可以对文本区域说话
<span id="button" onclick="toggleStartStop()">CLICK ME!</span>
<script type="text/javascript">
var recognizing;
var recognition = new webkitSpeechRecognition();
recognition.continuous = true;
recognition.onresult = function (event) {
for (var i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
TEXTAREA.value += event.results[i][0].transcript;
}
}
}
function reset() {
recognizing = false;
button.innerHTML = "Click to Speak";
}
function toggleStartStop() {
if (recognizing) {
recognition.stop();
reset();
} else {
recognition.start();
recognizing = true;
button.innerHTML = "Click to Stop";
}
function speak(text, callback) {
var u = new SpeechSynthesisUtterance();
u.text = text;
u.lang = 'en-US';
u.onend = function () {
if (callback) {
callback();
}
};
u.onerror = function (e) {
if (callback) {
callback(e);
}
};
speechSynthesis.speak(u);
}
}
</script>
但是我试图像谷歌语音搜索一样添加语音指示。
http://www.stateofdigital.com/wp-content/uploads/2015/04/google-voice-search.png
我已经搜索了我能想到的只能找到语音搜索结果的所有内容,与谷歌的输入级别指示无关(就像根据你说话的声音而增长的圆圈)。所以我想我会问,我怎么能这样做?
我发现的所有教程都是我能想到的语音识别,这是我能做的最好的,我不太了解js。所以另一个问题是如何在我说话的时候制作我输入的代码,现在它的方式只会在我暂停时插入文本。
答案 0 :(得分:1)
您可以在语音播放的同时获取音频流并自行计算级别。目前,语音识别API不支持该级别。代码应如下所示:
navigator.webkitGetUserMedia(
{
audio: true
},
function(stream)
{
// "that" is my wrapping object's scope
that.stream = stream;
var liveSource = context.createMediaStreamSource(stream);
var levelChecker = context.createJavaScriptNode(that.bufSize, 1 ,1);
liveSource.connect(levelChecker);
levelChecker.connect(context.destination);
levelChecker.onaudioprocess = function(event)
{
var buf = event.inputBuffer.getChannelData(0);
var len = buf.length;
var rms = 0;
// Iterate through buffer
for (var i = 0; i < len; i++)
{
rms += Math.abs(buf[i]);
}
rms = Math.sqrt(rms / len);
that.levelCheckerCB(rms);
};
}
);
有用的讨论是here。