使用HTML5 / Canvas / JavaScript获取浏览器屏幕截图

时间:2011-02-06 06:58:53

标签: javascript html5 canvas screenshot

Google的“报告错误”或“反馈工具”可让您选择浏览器窗口的某个区域,以创建提交的屏幕截图,其中包含有关错误的反馈。

Google Feedback Tool Screenshot Jason Small截图,张贴在duplicate question

他们是怎么做到的? Google的JavaScript反馈API已从here加载,their overview of the feedback module将展示屏幕截图功能。

6 个答案:

答案 0 :(得分:1084)

JavaScript可以读取DOM并使用canvas呈现相当准确的表示形式。我一直致力于将HTML转换为画布图像的脚本。今天决定将其实施为发送您所描述的反馈。

该脚本允许您创建反馈表单,其中包括在客户端浏览器上创建的屏幕截图以及表单。屏幕截图基于DOM,因此它可能不是真实表示的100%准确,因为它不会制作实际的屏幕截图,而是根据页面上可用的信息构建屏幕截图。

不需要来自服务器的任何呈现,因为整个图像是在客户端的浏览器上创建的。 HTML2Canvas脚本本身仍处于非常实验状态,因为它几乎不解析我想要的CSS3属性,即使代理可用,它也不支持加载CORS图像。

浏览器兼容性仍然非常有限(不是因为没有更多支持,只是没有时间让它更支持跨浏览器)。

有关更多信息,请查看此处的示例:

http://hertzen.com/experiments/jsfeedback/

修改 html2canvas脚本现在单独提供here和一些examples here

编辑2 另一个确认谷歌使用非常类似的方法(实际上,基于文档,唯一的主要区别是他们的异步遍历/绘图方法)可以在Google Feedback团队的Elliott Sprehn的演示文稿中找到: http://www.elliottsprehn.com/preso/fluentconf/

答案 1 :(得分:63)

您的网络应用现在可以使用getUserMedia()获取客户端整个桌面的“原生”屏幕截图:

看一下这个例子:

https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/

客户端必须使用chrome(现在),并且需要在chrome:// flags下启用屏幕捕获支持。

答案 2 :(得分:33)

使用getDisplayMedia API作为Canvas或Jpeg Blob / ArrayBuffer获取屏幕截图:

FIX 1 :仅对Electron.js使用getUserMedia和chromeMediaSource
FIX 2 :抛出错误,而是返回空对象
FIX 3 :修复演示以防止错误:getDisplayMedia must be called from a user gesture handler

// docs: https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getDisplayMedia
// see: https://www.webrtc-experiment.com/Pluginfree-Screen-Sharing/#20893521368186473
// see: https://github.com/muaz-khan/WebRTC-Experiment/blob/master/Pluginfree-Screen-Sharing/conference.js

function getDisplayMedia(options) {
    if (navigator.mediaDevices && navigator.mediaDevices.getDisplayMedia) {
        return navigator.mediaDevices.getDisplayMedia(options)
    }
    if (navigator.getDisplayMedia) {
        return navigator.getDisplayMedia(options)
    }
    if (navigator.webkitGetDisplayMedia) {
        return navigator.webkitGetDisplayMedia(options)
    }
    if (navigator.mozGetDisplayMedia) {
        return navigator.mozGetDisplayMedia(options)
    }
    throw new Error('getDisplayMedia is not defined')
}

function getUserMedia(options) {
    if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
        return navigator.mediaDevices.getUserMedia(options)
    }
    if (navigator.getUserMedia) {
        return navigator.getUserMedia(options)
    }
    if (navigator.webkitGetUserMedia) {
        return navigator.webkitGetUserMedia(options)
    }
    if (navigator.mozGetUserMedia) {
        return navigator.mozGetUserMedia(options)
    }
    throw new Error('getUserMedia is not defined')
}

async function takeScreenshotStream() {
    // see: https://developer.mozilla.org/en-US/docs/Web/API/Window/screen
    const width = screen.width * (window.devicePixelRatio || 1)
    const height = screen.height * (window.devicePixelRatio || 1)

    const errors = []
    let stream
    try {
        stream = await getDisplayMedia({
            audio: false,
            // see: https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamConstraints/video
            video: {
                width,
                height,
                frameRate: 1,
            },
        })
    } catch (ex) {
        errors.push(ex)
    }

    // for electron js
    if (navigator.userAgent.indexOf('Electron') >= 0) {
        try {
            stream = await getUserMedia({
                audio: false,
                video: {
                    mandatory: {
                        chromeMediaSource: 'desktop',
                        // chromeMediaSourceId: source.id,
                        minWidth         : width,
                        maxWidth         : width,
                        minHeight        : height,
                        maxHeight        : height,
                    },
                },
            })
        } catch (ex) {
            errors.push(ex)
        }
    }

    if (errors.length) {
        console.debug(...errors)
        if (!stream) {
            throw errors[errors.length - 1]
        }
    }

    return stream
}

async function takeScreenshotCanvas() {
    const stream = await takeScreenshotStream()

    // from: https://stackoverflow.com/a/57665309/5221762
    const video = document.createElement('video')
    const result = await new Promise((resolve, reject) => {
        video.onloadedmetadata = () => {
            video.play()
            video.pause()

            // from: https://github.com/kasprownik/electron-screencapture/blob/master/index.js
            const canvas = document.createElement('canvas')
            canvas.width = video.videoWidth
            canvas.height = video.videoHeight
            const context = canvas.getContext('2d')
            // see: https://developer.mozilla.org/en-US/docs/Web/API/HTMLVideoElement
            context.drawImage(video, 0, 0, video.videoWidth, video.videoHeight)
            resolve(canvas)
        }
        video.srcObject = stream
    })

    stream.getTracks().forEach(function (track) {
        track.stop()
    })
    
    if (result == null) {
        throw new Error('Cannot take canvas screenshot')
    }

    return result
}

// from: https://stackoverflow.com/a/46182044/5221762
function getJpegBlob(canvas) {
    return new Promise((resolve, reject) => {
        // docs: https://developer.mozilla.org/en-US/docs/Web/API/HTMLCanvasElement/toBlob
        canvas.toBlob(blob => resolve(blob), 'image/jpeg', 0.95)
    })
}

async function getJpegBytes(canvas) {
    const blob = await getJpegBlob(canvas)
    return new Promise((resolve, reject) => {
        const fileReader = new FileReader()

        fileReader.addEventListener('loadend', function () {
            if (this.error) {
                reject(this.error)
                return
            }
            resolve(this.result)
        })

        fileReader.readAsArrayBuffer(blob)
    })
}

async function takeScreenshotJpegBlob() {
    const canvas = await takeScreenshotCanvas()
    return getJpegBlob(canvas)
}

async function takeScreenshotJpegBytes() {
    const canvas = await takeScreenshotCanvas()
    return getJpegBytes(canvas)
}

function blobToCanvas(blob, maxWidth, maxHeight) {
    return new Promise((resolve, reject) => {
        const img = new Image()
        img.onload = function () {
            const canvas = document.createElement('canvas')
            const scale = Math.min(
                1,
                maxWidth ? maxWidth / img.width : 1,
                maxHeight ? maxHeight / img.height : 1,
            )
            canvas.width = img.width * scale
            canvas.height = img.height * scale
            const ctx = canvas.getContext('2d')
            ctx.drawImage(img, 0, 0, img.width, img.height, 0, 0, canvas.width, canvas.height)
            resolve(canvas)
        }
        img.onerror = () => {
            reject(new Error('Error load blob to Image'))
        }
        img.src = URL.createObjectURL(blob)
    })
}

演示:

document.body.onclick = async () => {
    // take the screenshot
    var screenshotJpegBlob = await takeScreenshotJpegBlob()

    // show preview with max size 300 x 300 px
    var previewCanvas = await blobToCanvas(screenshotJpegBlob, 300, 300)
    previewCanvas.style.position = 'fixed'
    document.body.appendChild(previewCanvas)

    // send it to the server
    var formdata = new FormData()
    formdata.append("screenshot", screenshotJpegBlob)
    await fetch('https://your-web-site.com/', {
        method: 'POST',
        body: formdata,
        'Content-Type' : "multipart/form-data",
    })
}

// and click on the page

答案 3 :(得分:27)

Niklas mentioned一样,您可以使用html2canvas库在浏览器中使用JS截屏。在这一点上,我将通过使用该库获取屏幕截图的示例来扩展他的答案:

function report() {
  let region = document.querySelector("body"); // whole screen
  html2canvas(region, {
    onrendered: function(canvas) {
      let pngUrl = canvas.toDataURL(); // png in dataURL format
      let img = document.querySelector(".screen");
      img.src = pngUrl; 

      // here you can allow user to set bug-region
      // and send it with 'pngUrl' to server
    },
  });
}
.container {
  margin-top: 10px;
  border: solid 1px black;
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/html2canvas/0.4.1/html2canvas.min.js"></script>
<div>Screenshot tester</div>
<button onclick="report()">Take screenshot</button>

<div class="container">
  <img width="75%" class="screen">
</div>

在获取图像作为数据URI后,在report()的{​​{1}}函数中,您可以将其显示给用户,并允许他用鼠标绘制“错误区域”,然后将屏幕截图和区域坐标发送给用户服务器。

this example onrendered版本中进行了制作:具有不错的async/await函数.

更新

一个简单的示例,使您可以截屏,选择区域,描述错误并发送POST请求(here jsfiddle)(主要功能是makeScreenshot())。

report()
async function report() {
    let screenshot = await makeScreenshot(); // png dataUrl
    let img = q(".screen");
    img.src = screenshot; 
    
    let c = q(".bug-container");
    c.classList.remove('hide')
        
    let box = await getBox();    
    c.classList.add('hide');

    send(screenshot,box); // sed post request  with bug image, region and description
    alert('To see POST requset with image go to: chrome console > network tab');
}

// ----- Helper functions

let q = s => document.querySelector(s); // query selector helper
window.report = report; // bind report be visible in fiddle html

async function  makeScreenshot(selector="body") 
{
  return new Promise((resolve, reject) => {  
    let node = document.querySelector(selector);
    
    html2canvas(node, { onrendered: (canvas) => {
        let pngUrl = canvas.toDataURL();      
        resolve(pngUrl);
    }});  
  });
}

async function getBox(box) {
  return new Promise((resolve, reject) => {
     let b = q(".bug");
     let r = q(".region");
     let scr = q(".screen");
     let send = q(".send");
     let start=0;
     let sx,sy,ex,ey=-1;
     r.style.width=0;
     r.style.height=0;
     
     let drawBox= () => {
         r.style.left   = (ex > 0 ? sx : sx+ex ) +'px'; 
         r.style.top    = (ey > 0 ? sy : sy+ey) +'px';
         r.style.width  = Math.abs(ex) +'px';
         r.style.height = Math.abs(ey) +'px'; 
     }
     
     
     
     //console.log({b,r, scr});
     b.addEventListener("click", e=>{
       if(start==0) {
         sx=e.pageX;
         sy=e.pageY;
         ex=0;
         ey=0;
         drawBox();
       }
       start=(start+1)%3;  		
     });
     
     b.addEventListener("mousemove", e=>{
       //console.log(e)
       if(start==1) {
           ex=e.pageX-sx;
           ey=e.pageY-sy
           drawBox(); 
       }
     });
     
     send.addEventListener("click", e=>{
       start=0;
       let a=100/75 //zoom out img 75%       
       resolve({
          x:Math.floor(((ex > 0 ? sx : sx+ex )-scr.offsetLeft)*a),
          y:Math.floor(((ey > 0 ? sy : sy+ey )-b.offsetTop)*a),
          width:Math.floor(Math.abs(ex)*a),
          height:Math.floor(Math.abs(ex)*a),
          desc: q('.bug-desc').value
          });
          
     });
  });
}

function send(image,box) {

    let formData = new FormData();
    let req = new XMLHttpRequest();
    
    formData.append("box", JSON.stringify(box)); 
    formData.append("screenshot", image);     
    
    req.open("POST", '/upload/screenshot');
    req.send(formData);
}
.bug-container { background: rgb(255,0,0,0.1); margin-top:20px; text-align: center; }
.send { border-radius:5px; padding:10px; background: green; cursor: pointer; }
.region { position: absolute; background: rgba(255,0,0,0.4); }
.example { height: 100px; background: yellow; }
.bug { margin-top: 10px; cursor: crosshair; }
.hide { display: none; }

答案 4 :(得分:7)

这是一个完整的屏幕截图示例,适用于 2021 年的 chrome。最终结果是一个可以传输的 blob。流程是:请求媒体>抓取帧>绘制到画布>传输到blob。如果您想提高内存效率,请探索 OffscreenCanvasImageBitmapRenderingContext

https://jsfiddle.net/v24hyd3q/1/

// Request media
navigator.mediaDevices.getDisplayMedia().then(stream => 
{
  // Grab frame from stream
  let track = stream.getVideoTracks()[0];
  let capture = new ImageCapture(track);
  capture.grabFrame().then(bitmap => 
  {
    // Stop sharing
    track.stop();
      
    // Draw the bitmap to canvas
    canvas.width = bitmap.width;
    canvas.height = bitmap.height;
    canvas.getContext('2d').drawImage(bitmap, 0, 0);
      
    // Grab blob from canvas
    canvas.toBlob(blob => {
        // Do things with blob here
        console.log('output blob:', blob);
    });
  });
})
.catch(e => console.log(e));

答案 5 :(得分:4)

以下示例使用:getDisplayMedia

document.body.innerHTML = '<video style="width: 100%; height: 100%; border: 1px black solid;"/>';

navigator.mediaDevices.getDisplayMedia()
.then( mediaStream => {
  const video = document.querySelector('video');
  video.srcObject = mediaStream;
  video.onloadedmetadata = e => {
    video.play();
    video.pause();
  };
})
.catch( err => console.log(`${err.name}: ${err.message}`));

另外值得一提的是Screen Capture API文档。