onaudioprocess没有在HTML5录音机中触发

时间:2014-07-08 06:13:49

标签: javascript html5-audio

我正在使用可视化工具构建HTML5语音录制,但是运行时程序在onaudioprocess中给出了一个错误,即未声明变量这是因为onaudioprocess没有运行。出了什么问题?这是我的完整代码:

// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;


if (!navigator.getUserMedia)
    navigator.getUserMedia = navigator.getUserMedia ||
                         navigator.webkitGetUserMedia ||
                         navigator.mozGetUserMedia || 
                         navigator.msGetUserMedia;

if (navigator.getUserMedia){
    navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');



function getVal(value)
  {


if ( value == "record"){
    recording = true;
    // reset the buffers for the new recording
    leftchannel.length = rightchannel.length = 0;
    recordingLength = 0;
    document.getElementById('output').innerHTML="Recording now...";


} else if ( value == "stop" ){

    // we stop recording
    recording = false;
    document.getElementById('output').innerHTML="Building wav file...";

    // we flat the left and right channels down
    var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
    var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
    // we interleave both channels together
    var interleaved = interleave ( leftBuffer, rightBuffer );



    var buffer = new ArrayBuffer(44 + interleaved.length * 2);
    var view = new DataView(buffer);

    // RIFF chunk descriptor
    writeUTFBytes(view, 0, 'RIFF');
    view.setUint32(4, 44 + interleaved.length * 2, true);
    writeUTFBytes(view, 8, 'WAVE');
    // FMT sub-chunk
    writeUTFBytes(view, 12, 'fmt ');
    view.setUint32(16, 16, true);
    view.setUint16(20, 1, true);
    // stereo (2 channels)
    view.setUint16(22, 2, true);
    view.setUint32(24, sampleRate, true);
    view.setUint32(28, sampleRate * 4, true);
    view.setUint16(32, 4, true);
    view.setUint16(34, 16, true);
    // data sub-chunk
    writeUTFBytes(view, 36, 'data');
    view.setUint32(40, interleaved.length * 2, true);


    var lng = interleaved.length;
    var index = 44;
    var volume = 1;
    for (var i = 0; i < lng; i++){
        view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
        index += 2;
    }

    var blob = new Blob ( [ view ], { type : 'audio/wav' } );

    // let's save it locally

    document.getElementById('output').innerHTML='Handing off the file now...';
    var url = (window.URL || window.webkitURL).createObjectURL(blob);

    var li = document.createElement('li');
    var au = document.createElement('audio');
    var hf = document.createElement('a');

    au.controls = true;
    au.src = url;
    hf.href = url;
    hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
    hf.innerHTML = hf.download;
    li.appendChild(au);
    li.appendChild(hf);
    recordingList.appendChild(li);

}
}


function success(e){

audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();


volume = context.createGain();

// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);

// connect the stream(source) to the gain node
source.connect(volume);

var bufferSize = 2048;

recorder = context.createScriptProcessor(bufferSize, 2, 2);

//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 1024;

analyser2 = context.createAnalyser();
analyser2.smoothingTimeConstant = 0.0;
analyser2.fftSize = 1024;


splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){

    if (!recording) return;
    var left = e.inputBuffer.getChannelData (0);
    var right = e.inputBuffer.getChannelData (1);

    // get the average of the first channel, bincount is fftsize / 2
    var array =  new Uint8Array(analyser.frequencyBinCount);
    analyser.getByteFrequencyData(array);
    var average = getAverageVolume(array);

     // get the average for the second channel
    var array2 =  new Uint8Array(analyser2.frequencyBinCount);
    analyser2.getByteFrequencyData(array2);
    var average2 = getAverageVolume(array2);
    // clear the current state
    context.clearRect(0, 0, 60, 130);

    // set the fill style
    context.fillStyle=gradient;

    // create the meters
    context.fillRect(0,130-average,25,130);
    context.fillRect(30,130-average2,25,130);
}

function getAverageVolume(array) {
    var values = 0;
    var average;

    var length = array.length;

    // get all the frequency amplitudes
    for (var i = 0; i < length; i++) {
        values += array[i];
    }

    average = values / length;
    return average;
    }

    leftchannel.push (new Float32Array (left));
    rightchannel.push (new Float32Array (right));
    recordingLength += bufferSize;

    // we connect the recorder(node to destination(speakers))
    volume.connect(splitter);
    splitter.connect(analyser, 0, 0);
    splitter.connect(analyser2, 1, 0);
    analyser.connect(recorder);
    recorder.connect(context.destination);

}




function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
    return result;
   }

function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);

var inputIndex = 0;

for (var index = 0; index < length; ){
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}


function writeUTFBytes(view, offset, string){ 
var lng = string.length;
for (var i = 0; i < lng; i++){

 view.setUint8(offset + i, string.charCodeAt(i));
}
}

请帮助

1 个答案:

答案 0 :(得分:0)

问题解决了,推动左右声道应该在onaudioprocess

之内
  leftchannel.push (new Float32Array (left));
  rightchannel.push (new Float32Array (right));
  recordingLength += bufferSize;