我有一个使用webkitOfflineAudioContext
呈现的音频缓冲区。现在,我希望将其导出为WAV文件。我该怎么做?我尝试使用recorder.js,但无法弄清楚如何使用它。这是我的代码:http://jsfiddle.net/GBQV8/。
答案 0 :(得分:11)
这是一个应该有所帮助的要点:https://gist.github.com/kevincennis/9754325。
我实际上没有对此进行过测试,因此可能会出现一些愚蠢的错字或其他内容,但基本方法可行(我以前做过)。
基本上,你将直接使用Recorder.js中的web worker,这样你就可以一次性处理一个大的AudioBuffer,而不是实时地逐步记录它。
我也会在这里粘贴代码,以防万一有什么事情发生......
// assuming a var named `buffer` exists and is an AudioBuffer instance
// start a new worker
// we can't use Recorder directly, since it doesn't support what we're trying to do
var worker = new Worker('recorderWorker.js');
// initialize the new worker
worker.postMessage({
command: 'init',
config: {sampleRate: 44100}
});
// callback for `exportWAV`
worker.onmessage = function( e ) {
var blob = e.data;
// this is would be your WAV blob
};
// send the channel data from our buffer to the worker
worker.postMessage({
command: 'record',
buffer: [
buffer.getChannelData(0),
buffer.getChannelData(1)
]
});
// ask the worker for a WAV
worker.postMessage({
command: 'exportWAV',
type: 'audio/wav'
});
答案 1 :(得分:7)
我想我会分享一个可行的解决方案,我可以从凯文的答案中整理出来。
这是waveWorker.js
脚本:
self.onmessage = function( e ){
var wavPCM = new WavePCM( e['data']['config'] );
wavPCM.record( e['data']['pcmArrays'] );
wavPCM.requestData();
};
var WavePCM = function( config ){
this.sampleRate = config['sampleRate'] || 48000;
this.bitDepth = config['bitDepth'] || 16;
this.recordedBuffers = [];
this.bytesPerSample = this.bitDepth / 8;
};
WavePCM.prototype.record = function( buffers ){
this.numberOfChannels = this.numberOfChannels || buffers.length;
var bufferLength = buffers[0].length;
var reducedData = new Uint8Array( bufferLength * this.numberOfChannels * this.bytesPerSample );
// Interleave
for ( var i = 0; i < bufferLength; i++ ) {
for ( var channel = 0; channel < this.numberOfChannels; channel++ ) {
var outputIndex = ( i * this.numberOfChannels + channel ) * this.bytesPerSample;
var sample = buffers[ channel ][ i ];
// Check for clipping
if ( sample > 1 ) {
sample = 1;
}
else if ( sample < -1 ) {
sample = -1;
}
// bit reduce and convert to uInt
switch ( this.bytesPerSample ) {
case 4:
sample = sample * 2147483648;
reducedData[ outputIndex ] = sample;
reducedData[ outputIndex + 1 ] = sample >> 8;
reducedData[ outputIndex + 2 ] = sample >> 16;
reducedData[ outputIndex + 3 ] = sample >> 24;
break;
case 3:
sample = sample * 8388608;
reducedData[ outputIndex ] = sample;
reducedData[ outputIndex + 1 ] = sample >> 8;
reducedData[ outputIndex + 2 ] = sample >> 16;
break;
case 2:
sample = sample * 32768;
reducedData[ outputIndex ] = sample;
reducedData[ outputIndex + 1 ] = sample >> 8;
break;
case 1:
reducedData[ outputIndex ] = ( sample + 1 ) * 128;
break;
default:
throw "Only 8, 16, 24 and 32 bits per sample are supported";
}
}
}
this.recordedBuffers.push( reducedData );
};
WavePCM.prototype.requestData = function(){
var bufferLength = this.recordedBuffers[0].length;
var dataLength = this.recordedBuffers.length * bufferLength;
var headerLength = 44;
var wav = new Uint8Array( headerLength + dataLength );
var view = new DataView( wav.buffer );
view.setUint32( 0, 1380533830, false ); // RIFF identifier 'RIFF'
view.setUint32( 4, 36 + dataLength, true ); // file length minus RIFF identifier length and file description length
view.setUint32( 8, 1463899717, false ); // RIFF type 'WAVE'
view.setUint32( 12, 1718449184, false ); // format chunk identifier 'fmt '
view.setUint32( 16, 16, true ); // format chunk length
view.setUint16( 20, 1, true ); // sample format (raw)
view.setUint16( 22, this.numberOfChannels, true ); // channel count
view.setUint32( 24, this.sampleRate, true ); // sample rate
view.setUint32( 28, this.sampleRate * this.bytesPerSample * this.numberOfChannels, true ); // byte rate (sample rate * block align)
view.setUint16( 32, this.bytesPerSample * this.numberOfChannels, true ); // block align (channel count * bytes per sample)
view.setUint16( 34, this.bitDepth, true ); // bits per sample
view.setUint32( 36, 1684108385, false); // data chunk identifier 'data'
view.setUint32( 40, dataLength, true ); // data chunk length
for (var i = 0; i < this.recordedBuffers.length; i++ ) {
wav.set( this.recordedBuffers[i], i * bufferLength + headerLength );
}
self.postMessage( wav, [wav.buffer] );
self.close();
};
以下是如何使用它的:
async function audioBufferToWaveBlob(audioBuffer) {
return new Promise(function(resolve, reject) {
var worker = new Worker('./waveWorker.js');
worker.onmessage = function( e ) {
var blob = new Blob([e.data.buffer], {type:"audio/wav"});
resolve(blob);
};
let pcmArrays = [];
for(let i = 0; i < audioBuffer.numberOfChannels; i++) {
pcmArrays.push(audioBuffer.getChannelData(i));
}
worker.postMessage({
pcmArrays,
config: {sampleRate: audioBuffer.sampleRate}
});
});
}
它很快被黑客攻击,所以当然可以免费修复它并在评论中发布更好的版本链接:)
答案 2 :(得分:0)
使用recorder.js时,请确保先录制一段音频,然后将其停止。停止录像机后,您可以调用.exportWAV
功能。回调包含wav格式的blob。您不必自己记录缓冲区,而是最好使用recorder.js的缓冲区创建,因为如果调用exportWAV,它将导出先前保存的缓冲区。它从创建新记录器时输入的源对象创建缓冲区。
var rec = new Recorder(yourSourceObject);
rec.record();
//let it record
rec.stop();
rec.exportWAV(function(blob){
//the generated blob contains the wav file
}])
您还可以查看recorderWorker.js的源代码,并了解如何自行将缓冲区转换为wav文件。
答案 3 :(得分:0)
如果没有实时处理,请查看OfflineAudioContext。
这对于处理音频数据可能很有用,就好像它是一个常规的AudioContext,但不是实时的。如果您的数据不是来自麦克风,您可能希望尽快处理它。然后,在将其编码为wav
之前,您需要使用OfflineAudioContext创建缓冲区