跨浏览器麦克风访问

时间:2017-07-11 23:45:22

标签: javascript client webrtc microphone

我正在寻找一种解决方案,允许网站访问和处理来自用户麦克风的音频流。这对我来说是一个陌生的领域。我一直在使用webRTC示例,但到目前为止只有它正在努力: 在运行塞拉利昂的2011年mac air上运行firefox和chrome。 Windows 10上的firefox。

我的脚本会在其他浏览器/操作系统组合上引发错误,但有些错误却无法正常运行。

有更好的解决方案吗?

!function(t,e){
  "use strict";
  t.AudioContext = t.AudioContext||t.webkitAudioContext,
  t.OfflineAudioContext = t.OfflineAudioContext||t.webkitOfflineAudioContext;

  var o=AudioContext.prototype,
  r=new AudioContext,n=function(t,e){
    return void 0===t&&void 0!==e
  },
  c=r.createBufferSource().constructor.prototype;
  if(n(c.start,c.noteOn)||n(c.stop,c.noteOff)){
    var i=o.createBufferSource;
    o.createBufferSource=function(){
      var t=i.call(this);
      return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
    }
  }
  if("function"==typeof r.createOscillator){
    var a=r.createOscillator().constructor.prototype;
    if(n(a.start,a.noteOn)||n(a.stop,a.noteOff)){
      var s=o.createOscillator;o.createOscillator=function(){
        var t=s.call(this);
        return t.start=t.start||t.noteOn,t.stop=t.stop||t.noteOff,t
      }
    }
  }
  if(void 0===o.createGain&&void 0!==o.createGainNode&&(o.createGain=o.createGainNode),void 0===o.createDelay&&void 0!==o.createDelayNode&&(o.createDelay=o.createGainNode),void 0===o.createScriptProcessor&&void 0!==o.createJavaScriptNode&&(o.createScriptProcessor=o.createJavaScriptNode),-1!==navigator.userAgent.indexOf("like Mac OS X")){
  var u=AudioContext;t.AudioContext=function(){
    function t(){
      r.start(0),r.connect(n),n.connect(e.destination)
    }
    var e=new u,
    o=document.body,
    r=e.createBufferSource(),
    n=e.createScriptProcessor(256,1,1);
    return o.addEventListener("touchstart",t,!1),n.onaudioprocess=function(){ 
        r.disconnect(),
        n.disconnect(),
        o.removeEventListener("touchstart",t,!1),
        n.onaudioprocess=null
      },e
    }
  }
}(window);

var context, analyser, gUM, dataArray, bufferLength, connect_source;
        if (AudioContext){
            context = new AudioContext();
            analyser = context.createAnalyser();

            function success(stream){
                // Create a new volume meter and connect it.
                var source = context.createMediaStreamSource(stream);
                compressor = context.createDynamicsCompressor();
                compressor.threshold.value = -50;
                compressor.knee.value = 40;
                compressor.ratio.value = 12;
                compressor.reduction.value = -20;
                compressor.attack.value = 0;
                compressor.release.value = 0.25;

                filter = context.createBiquadFilter();
                filter.Q.value = 8.30;
                filter.frequency.value = 355;
                filter.gain.value = 3.0;
                filter.type = 'bandpass';
                filter.connect(compressor);

                source.connect( filter );
                source.connect(analyser);

                analyser.fftSize = 512;
                bufferLength = analyser.frequencyBinCount; // half the FFT value
                dataArray = new Uint8Array(bufferLength); // create an array to store the data
            };
            function fail(e){
      if(e){}
      console.log(e);
      aizuchi.error();
    };
            var select = document.getElementById("AudioSourceSelect");
            function generateSelector(devices){
                while(select.firstChild) select.removeChild(select.firstChild);
                var opt;
                for(var l = devices.length; l--;){
                    console.log(devices[l]);
                    if(devices[l].kind == "audioinput"){
                        opt = document.createElement("option")
                        opt.text = devices[l].label
                        opt.value = devices[l].deviceId
                        if(devices[l].deviceId == "default") opt.setAttribute("selected","")
                        select.appendChild( opt );
                    }
                }
                select.onchange = function(){
                    connect_source(this.value);
                }
                select.onchange();
            }
            try {
                    navigator.mediaDevices.enumerateDevices().then(generateSelector)
            } catch(e){
                fail(e);
            }
            connect_source = function(audioSource){
        try {
                    if(Modernizr.getusermedia){
                        gUM = Modernizr.prefixed('getUserMedia', navigator);

                        gUM({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}},success,fail);
                    } else {
                        navigator.mediaDevices.getUserMedia({video:false, audio : {deviceId: audioSource ? {exact: audioSource} : undefined}}).then(success,fail);
                    }
        } catch(e){
        fail(e);
        }
            }
        }

1 个答案:

答案 0 :(得分:0)

尝试

var AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();

它看起来并不像浏览器统一了这个语法。

来源:MDN

相关问题