将两个音频缓冲区叠加到一个缓冲区源中

时间:2014-03-02 23:20:49

标签: javascript html5 audio html5-audio web-audio

尝试将两个缓冲区合并为一个缓冲区; 我已经能够从音频文件创建两个缓冲区并加载和播放它们。现在我需要将两个缓冲区合并到一个缓冲区中。他们怎么能合并?

  context = new webkitAudioContext();
  bufferLoader = new BufferLoader(
    context,
    [
      'audio1.mp3',
      'audio2.mp3',
    ],
    finishedLoading
    );

  bufferLoader.load();

function finishedLoading(bufferList) {
  // Create the two buffer sources and play them both together.
  var source1 = context.createBufferSource();
  var source2 = context.createBufferSource();
  source1.buffer = bufferList[0];
  source2.buffer = bufferList[1];

  source1.connect(context.destination);
  source2.connect(context.destination);
  source1.start(0);
  source2.start(0);  
}

现在这些来源分别加载并同时播放;但是如何将这两个源合并到一个缓冲源中呢?我不想追加它们,我想重叠/合并它们。

解释和/或片段会很棒。

2 个答案:

答案 0 :(得分:6)

在音频中,要将混合两个音频流(此处为缓冲区)合并为一个,您只需将每个样本值添加。实际上,我们可以在你的代码片段上做到这一点:

/* `buffers` is a javascript array containing all the buffers you want
 * to mix. */
function mix(buffers) {
  /* Get the maximum length and maximum number of channels accros all buffers, so we can
   * allocate an AudioBuffer of the right size. */
  var maxChannels = 0;
  var maxDuration = 0;
  for (var i = 0; i < buffers.length; i++) {
    if (buffers[i].numberOfChannels > maxChannels) {
      maxChannels = buffers[i].numberOfChannels;
    }
    if (buffers[i].duration > maxDuration) {
      maxDuration = buffers[i].duration;
    }
  }
  var out = context.createBuffer(maxChannels,
                                 context.sampleRate * maxDuration,
                                 context.sampleRate);

  for (var j = 0; j < buffers.length; j++) {
    for (var srcChannel = 0; srcChannel < buffers[j].numberOfChannels; srcChannel++) {
      /* get the channel we will mix into */
      var out = mixed.getChanneData(srcChannel);
      /* Get the channel we want to mix in */
      var in = buffers[i].getChanneData(srcChannel);
      for (var i = 0; i < in.length; i++) {
        out[i] += in[i];
      }
    }
  }
  return out;
}

然后,只需影响从此函数返回到新AudioBufferSourceNode.buffer,并像往常一样播放。

一些注意事项:为简单起见,我的代码段假定:

  • 如果您有单声道缓冲区和立体声缓冲区,您将只能听到混合缓冲区左声道中的单声道缓冲区。如果你想要它被复制到左边和右边,你将不得不做我们称为 up-mixing ;
  • 如果您希望缓冲区比其他缓冲区更安静或更响亮(就像您在调音台上移动音量推子一样),只需将toMix[i]值乘以小于1.0的数字即可使其更快,大于1.0使它更大声。

然后,Web Audio API为您做了所有这些,所以我想知道您为什么需要自己做,但至少现在您知道如何:-)。

答案 1 :(得分:1)

@Padenot是正确的,但是他的代码中有一些错字,因此如果您复制/粘贴它,它将无法工作。在下面,您可以找到具有更正的相同代码,因此可以使用它。感谢您的帮助@Padenot;)

function mix(buffers) {

    var nbBuffer = buffers.length;// Get the number of buffer contained in the array buffers
    var maxChannels = 0;// Get the maximum number of channels accros all buffers
    var maxDuration = 0;// Get the maximum length

    for (var i = 0; i < nbBuffer; i++) {
        if (buffers[i].numberOfChannels > maxChannels) {
            maxChannels = buffers[i].numberOfChannels;
        }
        if (buffers[i].duration > maxDuration) {
            maxDuration = buffers[i].duration;
        }
    }

    // Get the output buffer (which is an array of datas) with the right number of channels and size/duration
    var mixed = context.createBuffer(maxChannels, context.sampleRate * maxDuration, context.sampleRate);        

    for (var j=0; j<nbBuffer; j++){

        // For each channel contained in a buffer...
        for (var srcChannel = 0; srcChannel < buffers[j].numberOfChannels; srcChannel++) {

            var _out = mixed.getChannelData(srcChannel);// Get the channel we will mix into
            var _in = buffers[j].getChannelData(srcChannel);// Get the channel we want to mix in

            for (var i = 0; i < _in.length; i++) {
                _out[i] += _in[i];// Calculate the new value for each index of the buffer array
            }
        }
    }

    return mixed;
}