Web Audio API流音频标签在播放时通过http输出

时间:2016-04-06 13:20:12

标签: node.js html5 audio electron web-audio-api

我正在开发一个Electron应用程序,该应用程序通过Telegram Bot Api界面接收歌曲/语音请求,并以自动点唱机/收音机方式播放音频对象。

我想要实现的是通过http将我的应用的音频输出实时流式传输到连接到本地(nodejs)服务器的客户端。

所以基本上我需要在播放时处理所有音频标签PCM,然后将它们混合(可能将结果转换为mp3格式?)并将结果传送给客户端。至少那是我现在的想法。

不幸的是我坚持捕获音频对象输出。 我读到了RecordJs以及如何从AudioNode对象录制音频,但我还没有找到混合多音频标签传出流的示例。

你能帮我解决这个问题吗?

1 个答案:

答案 0 :(得分:0)

当Web Audio API呈现时,音频是原始PCM(未压缩),它在内存缓冲区中可用,根据缓冲区分配的大小进行清空/重新加载 - 您可以拦截并将此缓冲区复制到下游发布到客户端的进程中

将以下代码保存为html文件,然后在同一dir中使用

进行处理
python -m SimpleHTTPServer

指向http://localhost:8000/的浏览器 并选择你的新html文件...浏览器必须提示使用麦克风...然后查看你的javascript控制台(ctrl-shift-i)...在这里你看到FFT和时域音频数组的前三个元素在代码搜索中缓冲...

array_time_domain

这是你的原始PCM音频(要复制并发送到订阅的客户端(留作读者练习;-))...如果不需要降低CPU /电池消耗,请注释掉FFT相关代码

注意 - 当音频被泵入时,会重复调用onaudioprocess回调,以确保上述复制过程非常有效,因此它比音频缓冲区刷新之间的循环周期更快完成(提示Web Worker)

这里我使用来自麦克风的输入源音频。无论源音频如何,此内部回调呈现事件循环都是相同的

<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>capture microphone then show time & frequency domain output</title>

<script type="text/javascript">

var webaudio_tooling_obj = function () {

    var audioContext = new AudioContext();

    console.log("audio is starting up ...");

    var BUFF_SIZE_RENDERER = 16384;
    var SIZE_SHOW = 3; // number of array elements to show in console output

    var audioInput = null,
    microphone_stream = null,
    gain_node = null,
    script_processor_node = null,
    script_processor_analysis_node = null,
    analyser_node = null;

    if (!navigator.getUserMedia)
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
    navigator.mozGetUserMedia || navigator.msGetUserMedia;

    if (navigator.getUserMedia){

        navigator.getUserMedia({audio:true}, 
            function(stream) {
                start_microphone(stream);
            },
            function(e) {
                alert('Error capturing audio.');
            }
            );

    } else { alert('getUserMedia not supported in this browser.'); }

    // ---

    function show_some_data(given_typed_array, num_row_to_display, label) {

        var size_buffer = given_typed_array.length;
        var index = 0;

        console.log("__________ " + label);

        if (label === "time") {

            for (; index < num_row_to_display && index < size_buffer; index += 1) {

                var curr_value_time = (given_typed_array[index] / 128) - 1.0;

                console.log(curr_value_time);
            }

        } else if (label === "frequency") {

            for (; index < num_row_to_display && index < size_buffer; index += 1) {

                console.log(given_typed_array[index]);
            }

        } else {

            throw new Error("ERROR - must pass time or frequency");
        }
    }

    function process_microphone_buffer(event) {

        var i, N, inp, microphone_output_buffer;

        microphone_output_buffer = event.inputBuffer.getChannelData(0); // just mono - 1 channel for now
    }

    function start_microphone(stream){

        gain_node = audioContext.createGain();
        gain_node.connect( audioContext.destination );

        microphone_stream = audioContext.createMediaStreamSource(stream);
        microphone_stream.connect(gain_node); 

        script_processor_node = audioContext.createScriptProcessor(BUFF_SIZE_RENDERER, 1, 1);
        script_processor_node.onaudioprocess = process_microphone_buffer;

        microphone_stream.connect(script_processor_node);

        // --- enable volume control for output speakers

        document.getElementById('volume').addEventListener('change', function() {

            var curr_volume = this.value;
            gain_node.gain.value = curr_volume;

            console.log("curr_volume ", curr_volume);
        });

        // --- setup FFT

        script_processor_analysis_node = audioContext.createScriptProcessor(2048, 1, 1);
        script_processor_analysis_node.connect(gain_node);

        analyser_node = audioContext.createAnalyser();
        analyser_node.smoothingTimeConstant = 0;
        analyser_node.fftSize = 2048;

        microphone_stream.connect(analyser_node);

        analyser_node.connect(script_processor_analysis_node);

        var buffer_length = analyser_node.frequencyBinCount;

        var array_freq_domain = new Uint8Array(buffer_length);
        var array_time_domain = new Uint8Array(buffer_length);

        console.log("buffer_length " + buffer_length);

        script_processor_analysis_node.onaudioprocess = function() {

            // get the average for the first channel
            analyser_node.getByteFrequencyData(array_freq_domain);
            analyser_node.getByteTimeDomainData(array_time_domain);

            // draw the spectrogram
            if (microphone_stream.playbackState == microphone_stream.PLAYING_STATE) {

                show_some_data(array_freq_domain, SIZE_SHOW, "frequency");
                show_some_data(array_time_domain, SIZE_SHOW, "time"); // store this to record to aggregate buffer/file
            }
        };
    }

}(); //  webaudio_tooling_obj = function()

</script>

</head>
<body>

    <p>Volume</p>
    <input id="volume" type="range" min="0" max="1" step="0.1" value="0.0"/>

</body>
</html>