获取用户音频时出现Google Chrome Javascript问题-不允许启动AudioContext

时间:2019-03-06 15:09:39

标签: javascript google-chrome audiocontext

我有这个Javascript代码,当用户单击麦克风按钮时,该代码用于捕获用户的音频输入。该代码在Mozila Firefox中有效,但是当我在Google Chrome中使用它时,它无法正常工作,并且在控制台中显示此警告/错误-The AudioContext was not allowed to start. It must be resumed (or created) after a user gesture on the page.

var r = function() {
            var e = {}
              , t = void 0
              , n = getBotConfig()
              , r = new Audio("data:audio/wav;base64,")
              , o = !1;
            if (!n.isIE()) {
                window.AudioContext = window.AudioContext || window.webkitAudioContext;
                var i = new AudioContext;
                e.toggleRecording = function(e, t, n, r, s, a, c) {
                    e.classList.contains("recording") ? (e.classList.remove("recording"),
                    o = !1,
                    t.emit("end-recording", {
                        session_id: a,
                        bot_id: c
                    }),
                    document.getElementById("btnToggle").setAttribute("style", "background-color:transparent"),
                    document.getElementsByClassName("fa-microphone")[0] && document.getElementsByClassName("fa-microphone")[0].setAttribute("style", "color:" + s)) : (e.classList.add("recording"),
                    o = !0,
                    t.emit("start-recording", {
                        numChannels: 1,
                        bps: 16,
                        fps: parseInt(i.sampleRate),
                        session_id: a,
                        bot_id: c
                    }),
                    document.getElementById("btnToggle").setAttribute("style", "background-color:" + n),
                    document.getElementsByClassName("fa-microphone")[0] && document.getElementsByClassName("fa-microphone")[0].setAttribute("style", "color:" + r))
                }
                ,
                e.onAudioTTS = function(e) {
                    try {
                        r.pause(),
                        c(e)
                    } catch (t) {
                        c(e)
                    }
                }
                ,
                e.initAudio = function(e, n, r) {
                    console.log("audio initiated"),
                    t = e,
                    navigator.getUserMedia || (navigator.getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia),
                    navigator.cancelAnimationFrame || (navigator.cancelAnimationFrame = navigator.webkitCancelAnimationFrame || navigator.mozCancelAnimationFrame),
                    navigator.requestAnimationFrame || (navigator.requestAnimationFrame = navigator.webkitRequestAnimationFrame || navigator.mozRequestAnimationFrame),
                    navigator.getUserMedia({
                        audio: !0
                    }, a, function(e) {
                        alert("Error getting audio"),
                        console.log(e)
                    })
                }
                ;
                var s = function(e) {
                    var t = i.createChannelSplitter(2)
                      , n = i.createChannelMerger(2);
                    return e.connect(t),
                    t.connect(n, 0, 0),
                    t.connect(n, 0, 1),
                    n
                }
                  , a = function(e) {
                    var n = i.createGain()
                      , r = i.createMediaStreamSource(e)
                      , a = r;
                    a = s(a),
                    a.connect(n);
                    var c = (i.createScriptProcessor || i.createJavaScriptNode).call(i, 1024, 1, 1);
                    c.onaudioprocess = function(e) {
                        if (o) {
                            for (var n = e.inputBuffer.getChannelData(0), r = new ArrayBuffer(2 * n.length), i = new DataView(r), s = 0, a = 0; s < n.length; s++,
                            a += 2) {
                                var c = Math.max(-1, Math.min(1, n[s]));
                                i.setInt16(a, c < 0 ? 32768 * c : 32767 * c, !0)
                            }
                            t.emit("write-audio", r)
                        }
                    }
                    ,
                    n.connect(c),
                    c.connect(i.destination);
                    var u = i.createGain();
                    u.gain.value = 0,
                    n.connect(u),
                    u.connect(i.destination)
                }
                  , c = function(e) {
                    r.src = "data:audio/wav;base64," + e,
                    r.play()
                };
                return e
            }
        };

警告/错误出现在第var i = new AudioContext;行。它以前也可以在Google Chrome浏览器上工作,但现在不起作用。 Google开发人员页面上的说明说resume()必须习惯,但是我不确定该如何以及在何处进行。

2 个答案:

答案 0 :(得分:1)

您应该可以在致电#include<iostream> using namespace std; void insert(int *a[], const int location, const int numofelements, const int value) { int n, b[10]; a = *b; //a is **int. *b means the first element so *b is an int. for (int n = numofelements; n > location; n--) { a[n-1] = a[n]; } b[location] = value; *b = a; //a is **int. *b means the first element so *b is an int. } int main() { int test[10] = [1,2,3,4,5]; insert(test, 2, 5, 7); //test is a *int, the function expects a **int for (int i = 0; i < 6; i++) { cout << "test[" << i << "] = " << test[i]; } return 0; } 之前立即致电resume()。重要的是在用户操作/事件中调用它-例如单击麦克风按钮。

  

关键点:如果在文档之前创建了AudioContext   收到用户手势后,它将以“已暂停”状态创建,   并且您将需要在收到用户手势后调用resume()。

来自https://developers.google.com/web/updates/2017/09/autoplay-policy-changes

  

它以前也可以在Google Chrome浏览器上运行,但是现在   不起作用。

新政策最近在chrome更新中得到了实施。

答案 1 :(得分:1)

如果访问麦克风时您的问题与p5.js有关,请在setup

中进行操作
function setup() {
  mic = new p5.AudioIn();
  mic.start();
  getAudioContext().resume();
}

或添加touchStarted功能文档。您必须单击网页才能触发此功能。

function touchStarted() {
  getAudioContext().resume();
}