我想录制语音,将录制的语音(或音频blob)自动拆分为1秒的块,将每个块导出到wav文件并发送到后端。当用户讲话时,这应该异步发生。
我目前使用下面的recorder.js库来完成上述任务 https://cdn.rawgit.com/mattdiamond/Recorderjs/08e7abd9/dist/recorder.js
我的问题是,随着时间的过去,blob / wave文件的大小变得更大。我认为这是因为数据得到了累积,并使块大小更大。因此,随着时间的流逝,我实际上不是发送连续的1秒块,而是累积的块。
我无法弄清楚这个问题是在我的代码中引起的。可能是在recorder.js库中发生的。如果有人使用了记录器js或任何其他JavaScript方法来完成类似的任务,请感谢您是否可以看完这段代码,并告诉我它在哪里中断。
这是我的JS代码
var gumStream; // Stream from getUserMedia()
var rec; // Recorder.js object
var input; // MediaStreamAudioSourceNode we'll be recording
var recordingNotStopped; // User pressed record button and keep talking, still not stop button pressed
const trackLengthInMS = 1000; // Length of audio chunk in miliseconds
const maxNumOfSecs = 1000; // Number of mili seconds we support per recording (1 second)
// Shim for AudioContext when it's not available.
var AudioContext = window.AudioContext || window.webkitAudioContext;
var audioContext //audio context to help us record
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
//Event handlers for above 2 buttons
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
//Asynchronous function to stop the recoding in each second and export blob to a wav file
const sleep = time => new Promise(resolve => setTimeout(resolve, time));
const asyncFn = async() => {
for (let i = 0; i < maxNumOfSecs; i++) {
if (recordingNotStopped) {
rec.record();
await sleep(trackLengthInMS);
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
//Create the wav blob and pass it on to createWaveBlob
rec.exportWAV(createWaveBlob);
}
}
}
function startRecording() {
console.log("recordButton clicked");
recordingNotStopped = true;
var constraints = {
audio: true,
video: false
}
recordButton.disabled = true;
stopButton.disabled = false;
//Using the standard promise based getUserMedia()
navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
//Create an audio context after getUserMedia is called
audioContext = new AudioContext();
// Assign to gumStream for later use
gumStream = stream;
//Use the stream
input = audioContext.createMediaStreamSource(stream);
//Create the Recorder object and configure to record mono sound (1 channel)
rec = new Recorder(input, {
numChannels: 1
});
//Call the asynchronous function to split and export audio
asyncFn();
console.log("Recording started");
}).catch(function(err) {
//Enable the record button if getUserMedia() fails
recordButton.disabled = false;
stopButton.disabled = true;
});
}
function stopRecording() {
console.log("stopButton clicked");
recordingNotStopped = false;
//disable the stop button and enable the record button to allow for new recordings
stopButton.disabled = true;
recordButton.disabled = false;
//Set the recorder to stop the recording
rec.stop();
//stop microphone access
gumStream.getAudioTracks()[0].stop();
}
function createWaveBlob(blob) {
var url = URL.createObjectURL(blob);
//Convert the blob to a wav file and call the sendBlob function to send the wav file to the server
var convertedfile = new File([blob], 'filename.wav');
sendBlob(convertedfile);
}
答案 0 :(得分:0)
Recorder.js保留其记录的音频的记录缓冲区。调用exportWAV
时,记录缓冲区已编码但未清除。您需要先在录音机上调用clear
,然后再再次调用record
,以便从记录缓冲区中清除之前的音频块。