我正在使用Web Audio API创建一个简单的频谱分析仪,使用计算机麦克风作为输入信号。我当前实现的基本功能可以正常使用默认采样率(通常为48KHz,但根据浏览器的不同,可能为44.1KHz)。
对于某些应用,我想对FFT使用较低的采样率(〜8KHz)。
Web Audio API似乎添加了自定义采样率的支持,目前仅在FireFox(https://developer.mozilla.org/en-US/docs/Web/API/AudioContextOptions/sampleRate)上可用。
将采样率添加到上下文构造器中:
// create AudioContext object named 'audioCtx'
var audioCtx = new (AudioContext || webkitAudioContext)({sampleRate: 8000,});
console.log(audioCtx.sampleRate)
控制台输出“ 8000”(在FireFox中),因此似乎可以正常工作了。
用户使用下拉菜单打开麦克风。这是下拉功能:
var microphone;
function getMicInputState()
{
let selectedValue = document.getElementById("micOffOn").value;
if (selectedValue === "on") {
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => {
microphone = audioCtx.createMediaStreamSource(stream);
microphone.connect(analyserNode);
})
.catch(err => { alert("Microphone is required."); });
} else {
microphone.disconnect();
}
}
在FireFox中,使用下拉菜单激活麦克风会显示一个弹出窗口,要求访问麦克风(通常如此)。单击以允许麦克风后,控制台将显示:“当前不支持从AudioContexts中以不同的采样率连接AudioNodes”。频谱分析仪的显示保持空白。
任何想法如何克服此错误?如果我们能够克服这个问题,那么在用户的声卡采样率未知时如何指定sampleRate的任何指导?
答案 0 :(得分:1)
一种解决此问题的方法是,通过脚本处理器节点将从麦克风捕获的音频数据包传递到分析器节点,该脚本处理器会对通过它的音频数据包进行重新采样。
脚本处理器节点简要概述
这是伪代码:
当触发onaudioprocess事件时:
4.1)从输入缓冲区提取音频数据
4.2)重新采样音频数据
4.3)将重新采样的数据放入输出缓冲区
以下代码段实现了上述伪代码:
var microphone;
// *** 1) create a script processor node
var scriptProcessorNode = audioCtx.createScriptProcessor(4096, 1, 1);
function getMicInputState()
{
let selectedValue = document.getElementById("micOffOn").value;
if (selectedValue === "on") {
navigator.mediaDevices.getUserMedia({audio: true})
.then(stream => {
microphone = audioCtx.createMediaStreamSource(stream);
// *** 2) connect live media source to analyserNode via script processor node
microphone.connect(scriptProcessorNode);
scriptProcessorNode.connect(analyserNode);
})
.catch(err => { alert("Microphone is required."); });
} else {
microphone.disconnect();
}
}
// *** 3) Whenever an audio packet passes through script processor node, resample it
scriptProcessorNode.onaudioprocess = function(event){
var inputBuffer = event.inputBuffer;
var outputBuffer = event.outputBuffer;
for(var channel = 0; channel < outputBuffer.numberOfChannels; channel++){
var inputData = inputBuffer.getChannelData(channel);
var outputData = outputBuffer.getChannelData(channel);
// *** 3.1) Resample inputData
var fromSampleRate = audioCtx.sampleRate;
var toSampleRate = 8000;
var resampledAudio = downsample(inputData, fromSampleRate, toSampleRate);
// *** 3.2) make output equal to the resampled audio
for (var sample = 0; sample < outputData.length; sample++) {
outputData[sample] = resampledAudio[sample];
}
}
}
function downsample(buffer, fromSampleRate, toSampleRate) {
// buffer is a Float32Array
var sampleRateRatio = Math.round(fromSampleRate / toSampleRate);
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Float32Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
var accum = 0, count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = accum / count;
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result;
}