电子桌面捕捉器到Web API频率全为0

时间:2018-09-17 20:58:01

标签: node.js electron

====已解决====

说实话,我不确定自己做了什么,但我有一个完全正常工作的基于桌面音频的音频条可视化器。我回到原始的代码笔(下面的链接),开始工作并根据需要编辑所有内容以接受媒体流,然后它就可以工作了。

完整代码

const {desktopCapturer} = require('electron')

desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
  if (error) throw error
  for (let i = 0; i < sources.length; ++i) {
     if (sources[i].name === 'Entire screen') {
        navigator.mediaDevices.getUserMedia({
          audio: { mandatory : { chromeMediaSource: 'desktop' }},
          video: { mandatory : { chromeMediaSource: 'desktop' }}
        })
        .then((stream) => handleStream(stream))
        return
     }
  }
})

function handleStream (stream)
{
  const context = new AudioContext()
  let src = context.createMediaStreamSource(stream)
  let analyser = context.createAnalyser()

  let canvas = document.getElementById("canvas")
  canvas.width = window.innerWidth
  canvas.height = window.innerHeight
  let ctx = canvas.getContext("2d")

  src.connect(analyser)
  analyser.fftSize = 256

  let bufferLength = analyser.frequencyBinCount
  let dataArray = new Uint8Array(bufferLength)
  let WIDTH = canvas.width
  let HEIGHT = canvas.height
  let barWidth = (WIDTH / bufferLength) * 2.5
  let barHeight
  let x = 0

   function renderFrame()
   {
      requestAnimationFrame(renderFrame)
      x = 0

      analyser.getByteFrequencyData(dataArray)
      ctx.fillStyle = "#000"
      ctx.fillRect(0, 0, WIDTH, HEIGHT)

      for (let i = 0; i < bufferLength; i++)
      {
         barHeight = dataArray[i]

         let r = barHeight + (25 * (i / bufferLength))
         var g = 250 * (i/bufferLength)
         var b = 50

         ctx.fillStyle = `rgb(${r}, ${g}, ${b})`
         ctx.fillRect(x, HEIGHT - barHeight, barWidth, barHeight)

         x += barWidth + 1
      }
   }
   renderFrame()
}

我用作起点的Codepen https://codepen.io/nfj525/pen/rVBaab

原始帖子

我正在设置一个桌面可视化工具,以图形化用户的桌面音频。

desktopCapture似乎正在抓取媒体,因为将其发送到视频标签会显示流以及媒体的回声。因为我只需要音频,所以我将mediaStraem设置为AudioContext MediaStreamSource,如果将分析仪连接到ctx目标,当我收到音频回声时,它似乎也可以正常工作。我遇到的问题是,当我尝试获取频率数据时,它返回的数组只有0。下面是我当前的代码

const {desktopCapturer} = require('electron')

desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
  if (error) throw error
  for (let i = 0; i < sources.length; ++i) {
     if (sources[i].name === 'Entire screen') {
        navigator.mediaDevices.getUserMedia({
          audio: {
              mandatory : {
                  chromeMediaSource: 'desktop'
              }
          },
          video: {
             mandatory: {
                chromeMediaSource: 'desktop',
             }
          }
        })
        .then((stream) => handleStream(stream))
        return
     }
  }
})

function handleStream (stream) {

   let audioCtx = new AudioContext();
   let source = audioCtx.createMediaStreamSource(stream);
   let analyser = audioCtx.createAnalyser()

   //uncommenting results in echo, but still all 0s
   //analyser.connect(audioCtx.destination)

   analyser.fftSize = 256
   let bufferLength = analyser.frequencyBinCount
   let dataArray = new Uint8Array(bufferLength)

   console.log(dataArray)

}

=====编辑=====

我已经能够完成这项工作;有点,但仍然遇到一个小问题。

1)我已经用

连接了信号源和分析仪
source.connect(analyser)

2)必须用

填充时域的dataArray
getByteTimeDomainData

3)突出的问题是,当没有媒体在播放dataArray时,它们的值分别为126、127和128,从而使条形几乎在整个高度上“跳舞”。

4)FPS看起来也非常快,但是有一些解决方案

当前有效的代码:

const {desktopCapturer} = require('electron')

desktopCapturer.getSources({types: ['window', 'screen']}, (error, sources) => {
  if (error) throw error
  for (let i = 0; i < sources.length; ++i) {
     if (sources[i].name === 'Entire screen') {
        navigator.mediaDevices.getUserMedia({
          audio: {
              mandatory : {
                  chromeMediaSource: 'desktop'
              }
          },
          video: {
             mandatory: {
                chromeMediaSource: 'desktop',
             }
          }
        })
        .then((stream) => handleStream(stream))
        return
     }
  }
})

function handleStream (stream) {

   const audioCtx = new AudioContext()
   let source = audioCtx.createMediaStreamSource(stream)
   let analyser = audioCtx.createAnalyser()
   source.connect(analyser) //Had To Connect Source To Analyser
   analyser.fftSize = 128

   let bufferLength = analyser.frequencyBinCount
   let dataArray = new Uint8Array(bufferLength)

   let canvas = document.getElementById("canvas")
   canvas.width = window.innerWidth
   canvas.height = window.innerHeight
   let canvasCtx = canvas.getContext("2d")

   let WIDTH = canvas.width;
   let HEIGHT = canvas.height;
   let barWidth = (WIDTH / bufferLength);
   let barHeight;
   let x = 0;

   function draw() {

      var drawVisual = requestAnimationFrame(draw)
      analyser.getByteTimeDomainData(dataArray) //added to the draw to fill the dataArray

      canvasCtx.fillStyle = "#000";
      canvasCtx.fillRect(0, 0, WIDTH, HEIGHT);

      var x = 0;

      for (let i = 0; i < bufferLength; i++) {
        barHeight = dataArray[i];

        let r = 50
        let g = 250
        let b = 50

        canvasCtx.fillStyle = `rgb(${r}, ${g}, ${b})`
        canvasCtx.fillRect(x, HEIGHT - barHeight, barWidth, barHeight);

        x += barWidth + 1;
      }
   }
draw()
}

0 个答案:

没有答案