如何通过Web Audio API javascript分离立体声麦克风输入通道

时间:2016-08-16 03:48:21

标签: javascript google-chrome firefox web-audio web-audio-api

我尝试使用以下javascript和Web Audio API分离立体声麦克风通道..但它无法正常工作。 输入立体声麦克风设计用于分离ch0(说话)和ch1(接收)中的电话通话。这款立体声麦克风可与QuickTime录音一起正常工作。说话和接收语音分为L和R.使用Firefox,说话和接收语音在分离器的channel0上混合。在channel1上没有任何内容。使用Chrome时,说话和接收语音混合并出现在分配器的channel0和channel1上。我的目标是在channel0上分离说话语音并在channel1上接收语音。 在navigator.getUserMedia({audio:true}。

中授予麦克风权限时,该函数将被回调。

有人有建议吗?



Microphone.prototype.onMediaStream = function(stream) {
  var audioTrack=stream.getAudioTracks();
  var audioStreamTrackName = audioTrack[0].label;
  var audioStreamTrackType = audioTrack[0].kind;
  console.log('onMediaStream: getAudioTracks:[0] '+ audioStreamTrackName +' '+ audioStreamTrackType);
  var AudioCtx = window.AudioContext || window.webkitAudioContext;
  var maxdb =-10.0, mindb=-80.0, fft=2048, smoothing =0.8,maxVol=1.0,minVol=0.0;
  if (!AudioCtx)
    throw new Error('AudioContext not available');
  if (!this.audioContext)
   this.audioContext = new AudioCtx();
// create two analyser
   this.analyser0 = this.audioContext.createAnalyser();
   this.analyser0.fftSize = fft;
   this.analyser0.minDecibels=mindb;
   this.analyser0.maxDecibels=maxdb;
   this.analyser0.smoothingTimeConstant=smoothing;
   this.analyser1 = this.audioContext.createAnalyser();
   this.analyser1.fftSize = fft;
   this.analyser1.minDecibels=mindb;
   this.analyser1.maxDecibels=maxdb;
   this.analyser1.smoothingTimeConstant=smoothing;
   this.splitter = this.audioContext.createChannelSplitter(2);   //2ch splitter Stereo to 2 monoral stream
   this.merger = this.audioContext.createChannelMerger(2);       //2ch marger   two monoral to stereo
   this.gainCh0 = this.audioContext.createGain();
   this.gainCh1 = this.audioContext.createGain();
   this.gainCh0.gain.value = maxVol;    // max 
   this.gainCh1.gain.value = maxVol;    // max
   if (!this.mic) {
    this.mic = this.audioContext.createScriptProcessor(8192,2,2);
   }
   this.mic.onaudioprocess = this._onaudioprocess.bind(this);
   this.stream = stream;
   this.audioInput = this.audioContext.createMediaStreamSource(stream);
// audio-nodes connections   
   this.audioInput.connect(this.splitter);
   this.splitter.connect(this.gainCh0, 0);     // connect splitter output channel0 to gainCh0
   this.splitter.connect(this.gainCh1, 1);     // connect splitter output channel1 to gainCh
   this.gainCh0.connect(this.merger, 0, 0);    //connect gainCh0 output channel0 to merger input channel0 
   this.gainCh1.connect(this.merger, 0, 1);    //connect gainCh1 output channel0 to merger input channel1 
   this.merger.connect(this.mic);              // connect merger to ScriptProcessor
   this.mic.connect(this.audioContext.destination);
   this.gainCh0.connect(this.analyser0);
   this.gainCh1.connect(this.analyser1);
// start recording
   this.onStartRecording();
};




firefox web audio debug tool output

0 个答案:

没有答案