将解码样本的Float32Array转换为AudioBuffer

时间:2014-06-11 16:38:25

标签: javascript web-audio aurora.js

因为我尝试支持的其中一个浏览器不允许我使用AudioContext.decodeAudioData()解码特定编解码器,所以我使用Aurora.js解码音频文件

如何将从Aurora.js收到的解码样本更改为我实际可以用来播放音频的AudioBuffer?

到目前为止,这是我的代码:

var AudioContext = (window.AudioContext || window.webkitAudioContext);
var context = new AudioContext();
var segmentUrls = [
    '/segments/00.wav',
    '/segments/05.wav',
    '/segments/10.wav',
    '/segments/15.wav',
    '/segments/20.wav',
    '/segments/25.wav',
    '/segments/30.wav',
    '/segments/35.wav',
    '/segments/40.wav',
    '/segments/45.wav',
    '/segments/50.wav',
    '/segments/55.wav'
];

Promise.all(segmentUrls.map(loadSound))
    .then(function(buffers) {
        var startAt = 0;
        buffers.forEach(function(buffer) {
            playSound(startAt, buffer);
            startAt += buffer.duration;
        });
    })
    .catch(function(err) {
        console.error(err);
    });

function playSound(offset, buffer) {
    var source = context.createBufferSource();
    source.buffer = buffer;
    source.connect(context.destination);
    source.start(offset);
}

function loadSound(url) {
    return new Promise(function(resolve, reject) {
        var request = new XMLHttpRequest();
        request.open('GET', url, true);
        request.responseType = 'arraybuffer';

        request.onload = function onLoad() {
            resolve(decodeAudioData(request.response));
        };

        request.onerror = function onError() {
            reject('Could not request file');
        };
        request.send();
    });
}

function decodeAudioData(audioData) {
    return new Promise(function(resolve, reject) {
        var asset = AV.Asset.fromBuffer(audioData);
        asset.decodeToBuffer(function(buffer) {
            // Create an AudioBuffer here
        });
    });
}

2 个答案:

答案 0 :(得分:0)

您必须创建适当大小和频道数的AudioBuffer,并将数据从一个Float32缓冲区复制到另一个。

答案 1 :(得分:0)

以下是将数据放入AudioBuffer然后播放的MDN代码段:

https://developer.mozilla.org/en-US/docs/Web/API/AudioBuffer

// Stereo
var channels = 2;

// Create an empty two second stereo buffer at the
// sample rate of the AudioContext
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();    
var frameCount = audioCtx.sampleRate * 2.0;

var myArrayBuffer = audioCtx.createBuffer(channels, frameCount, audioCtx.sampleRate);

button.onclick = function() {
  // Fill the buffer with white noise;
  // just random values between -1.0 and 1.0
  for (var channel = 0; channel < channels; channel++) {
    // This gives us the actual array that contains the data
    var nowBuffering = myArrayBuffer.getChannelData(channel);
    for (var i = 0; i < frameCount; i++) {
      // Math.random() is in [0; 1.0]
      // audio needs to be in [-1.0; 1.0]
      nowBuffering[i] = Math.random() * 2 - 1;
    }
  }

  // Get an AudioBufferSourceNode.
  // This is the AudioNode to use when we want to play an AudioBuffer
  var source = audioCtx.createBufferSource();

  // set the buffer in the AudioBufferSourceNode
  source.buffer = myArrayBuffer;

  // connect the AudioBufferSourceNode to the
  // destination so we can hear the sound
  source.connect(audioCtx.destination);

  // start the source playing
  source.start();

}