HTML 5:AudioContext AudioBuffer

时间:2016-07-09 14:00:46

标签: javascript html5 webrtc web-audio

我需要了解音频缓冲区的工作方式,并且要做到这一点我想制作以下序列:Microphone-> Auto-> Processor-> Manual-> Buffer-> Auto-> Speakers。自动意味着自动数据传输和手动我通过processor.onaudioprocess中的代码自行完成。所以我有以下代码:

navigator.getUserMedia = navigator.getUserMedia ||navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
var audioContext;
var myAudioBuffer;
var microphone;
var speakers;
if (navigator.getUserMedia) {
    navigator.getUserMedia(
        {audio: true}, 
        function(stream) {
            audioContext = new AudioContext();
            //STEP 1 - we create buffer and its node
            speakers = audioContext.destination;
            myAudioBuffer = audioContext.createBuffer(1, 22050, 44100);
            var bufferNode = audioContext.createBufferSource();
            bufferNode.buffer = myAudioBuffer;
            bufferNode.connect(speakers);
            bufferNode.start();

            //STEP 2- we create microphone and processor
            microphone = audioContext.createMediaStreamSource(stream);
            var processor = (microphone.context.createScriptProcessor || 
                microphone.context.createJavaScriptNode).call(microphone.context,4096, 1, 1);
            processor.onaudioprocess = function(audioProcessingEvent) {
                var inputBuffer = audioProcessingEvent.inputBuffer;
                var inputData = inputBuffer.getChannelData(0); // we have only one channel
                var nowBuffering = myAudioBuffer.getChannelData(0);
                for (var sample = 0; sample < inputBuffer.length; sample++) {
                  nowBuffering[sample] = inputData[sample];
                }
            }

            microphone.connect(processor);                    

        },
        function() {
            console.log("Error 003.")
        });
}

但是,此代码不起作用。没有错误,只有沉默。我的错误在哪里?

2 个答案:

答案 0 :(得分:4)

EDIT

So since the OP definitely wants to use a buffer. I wrote some more code which you can try out on JSFiddle. The trick part definitely was that you somehow have to pass the input from the microphone through to some "destination" to get it to process.

navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

// TODO: Figure out what else we need and give the user feedback if he doesn't
// support microphone input.
if (navigator.getUserMedia) {
  captureMicrophone();
}

// First Step - Capture microphone and process the input
function captureMicrophone() {
  // process input from microphone
  const processAudio = ev =>
      processBuffer(ev.inputBuffer.getChannelData(CHANNEL));

  // setup media stream from microphone
  const microphoneStream = stream => {
    const microphone = audioContext.createMediaStreamSource(stream);
    microphone.connect(processor);
    // #1 If we don't pass through to speakers 'audioprocess' won't be triggerd
    processor.connect(mute);
  };
  // TODO: Handle error properly (see todo above - but probably more specific)
  const userMediaError = err => console.error(err);

  // Second step - Process buffer and output to speakers
  const processBuffer = buffer => {
    audioBuffer.getChannelData(CHANNEL).set(buffer);
    // We could move this out but that would affect audio quality
    const source = audioContext.createBufferSource();
    source.buffer = audioBuffer;
    source.connect(speakers);
    source.start();
  }

  const audioContext = new AudioContext();
  const speakers = audioContext.destination;
  // We currently only operate on this channel we might need to add a couple
  // lines of code if this fact changes
  const CHANNEL = 0;
  const CHANNELS = 1;
  const BUFFER_SIZE = 4096;
  const audioBuffer = audioContext.createBuffer(CHANNELS, BUFFER_SIZE, audioContext.sampleRate);

  const processor = audioContext.createScriptProcessor(BUFFER_SIZE, CHANNELS, CHANNELS);

  // #2 Not needed we could directly pass through to speakers since there's no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

  // #2 Not needed we could directly pass through to speakers since there's no
  // data anyway but just to be sure that we don't output anything
  const mute = audioContext.createGain();
  mute.gain.value = 0;
  mute.connect(speakers);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

The code I wrote up there looks quite dirty to me. But since you have a large project you can definitely structure it much more cleanly.

I've no clue what you're trying to achieve but I definitely also recommend to have a look at Recorder.js

Previous answer

The main point you're missing is that you'll get an output buffer passed into createScriptProcessor so all the createBuffer stuff you do is unnecessary. Apart from that you're on the right track.

This would be a working solution. Try it out on JSFiddle!

navigator.getUserMedia = navigator.getUserMedia ||
    navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

if (navigator.getUserMedia) {
  captureMicrophone();
}
function captureMicrophone() {
  const audioContext = new AudioContext();
  const speaker = audioContext.destination;
  const processor = audioContext.createScriptProcessor(4096, 1, 1);

  const processAudio =
      ev => {
        const CHANNEL = 0;
        const inputBuffer = ev.inputBuffer;
        const outputBuffer = ev.outputBuffer;
        const inputData = inputBuffer.getChannelData(CHANNEL);
        const outputData = outputBuffer.getChannelData(CHANNEL);

        // TODO: manually do something with the audio
        for (let i = 0; i < inputBuffer.length; ++i) {
          outputData[i] = inputData[i];
        }
      };

  const microphoneStream =
      stream => {
        const microphone = audioContext.createMediaStreamSource(stream);
        microphone.connect(processor);
        processor.connect(speaker);
      };

  // TODO: handle error properly
  const userMediaError = err => console.error(err);

  processor.addEventListener('audioprocess', processAudio);
  navigator.getUserMedia({audio: true}, microphoneStream, userMediaError);
}

答案 1 :(得分:2)

你是否正在沉默(即你的onprocess被调用,但是缓冲区是空的)或什么都没有(即你的onprocess永远不会被调用)?

如果是后者,请尝试将脚本处理器连接到context.destination。即使您不使用输出,某些实现当前也需要该连接来提取数据。