Three.js使用renderTarget的图像数据更新DataTextures

时间:2015-03-09 13:40:23

标签: three.js webgl rendertarget

我正在研究Three.js中的运动检测程序,该程序使用当前帧和上一帧之间的差异。目前,在减法之前,当前帧和前一帧都使用Three.EffectComposer模糊。

主要问题是:我不想再次模糊前一帧,而是希望在减法过程中使用先前模糊的“当前”帧作为纹理。

我设法做的最接近的是使用下面的函数来更新Three.DataTexture的image.data。它在渲染模糊作曲器后的render() - 函数中使用,但在渲染减法之前。 它们都使用Three.CopyShader呈现在屏幕上。

function getData(image) {
    var canvas = document.createElement('canvas');
    canvas.width = image.width;
    canvas.height = image.height;

    var context = canvas.getContext('2d');
    context.drawImage(image, 0, 0);
    return  new Uint8Array(context.getImageData(0, 0, image.width, image.height).data);
}

“图像”是renderer.domElement。这种方法效率很低,我需要将Blur-pass渲染到屏幕上,这会导致结果闪烁。

编辑1 当前代码如下所示,它会模糊当前和之前的图像,然后计算差异。 animate() - 函数是兴趣点。

<!DOCTYPE html>
<html lang="en">
<head>
    <title>Three.js Webcam Test</title>
    <meta charset="utf-8">
   <!-- <meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
-->
    <script src="//ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
    <script src="//ajax.googleapis.com/ajax/libs/jqueryui/1.10.3/jquery-ui.min.js"></script>
    <script src="lib/three.min.js"></script>

    <!-- Effect composer scripts -->
    <script src = "postprocessing/EffectComposer.js"></script>
    <script src = "postprocessing/MaskPass.js"></script>
    <script src = "postprocessing/RenderPass.js"></script>
    <script src = "postprocessing/TexturePass.js"></script>
    <script src = "postprocessing/ShaderPass.js"></script>
    <script src = "postprocessing/SavePass.js"></script>

    <script src = "shaders/CopyShader.js"></script>
    <script src = "shaders/ColorifyShader.js"></script>
    <script src = "shaders/DilationShader.js"></script>
    <script src = "shaders/ErosionShader.js"></script>
    <script src = "shaders/HorizontalBlurShader.js"></script>
    <script src = "shaders/VerticalBlurShader.js"></script>
    <script src = "shaders/BlendShader.js"></script>
    <script src = "shaders/passThroughShader.js"></script>
    <script src = "shaders/framediffShader.js"></script>
    <script src = "shaders/PawaskarPostShader.js"></script>
    <!-- ----------------------- -->

    <script src="lib/Projector.js"></script>
    <script src="lib/CanvasRenderer.js"></script>
    <script src="lib/webcam.js"></script>
    <script src="lib/perspective.js"></script>
    <script src="lib/stats.min.js"></script>
    <script src="lib/rStats.js"></script>
    <script src="lib/rStats.extras.js"></script>
    <script type="text/javascript" src="lib/dat.gui.min.js"></script>

    <link href="css/style.css" rel="stylesheet" type="text/css">
    <link rel="stylesheet" href="http://bootswatch.com/lumen/bootstrap.min.css">
    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap-theme.min.css">
    <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/js/bootstrap.min.js"></script>

</head>
<body>

    <h1>Motion Detection (in progress)</h1>
        <p>Press P to print the current frame</p>
<div id="WebGL-output">
</div>

<div id="camera" class="camera">
    <div class="label"></div>
    <video id="theVideo" autoplay width="640" height="480" class="webcam"></video>
    <canvas id="theCanvas"  width="640" height="480" class="hidden"></canvas>
</div>

<script>
    var SCREEN_HEIGHT = 480;
    var SCREEN_WIDTH = 640;

    var values = {
        detectmotion: true,
        softness: 0.17,
        threshold: 0.11,
        color: "#ffae23",
        usecolor: false,
        postprocess: false,
        postprocessmethod: 0,
        preprocess: true,
        detectedges: false,
        framedifference: false,
        binarydifference: false,
        bufferlength: 1
    };

    var stats, container, video, renderer, currTexture, uniforms, camera, scene, prevTexture, prevTexture2, prevTexture3, prevTextureBuffer = [], bufferLenght,
        videoContext,
        prevTime;
    var rS, glS, tS;
    var postProcessFilters = [];
    var prepScene, prepRenderTarget, prepComposer, prepPrevComposer, hBlur, vBlur, temporalShader, prevTemporalShader, prevBlur;
    var modelScene, modelRenderTarget, modelComposer, passShader;
    var subtractScene, subtractRenderTarget,subtractComposer, subtractShader;
    //GUI variables
    var gui, cPostProcessMethod, doPostProcess = false;
    var frameNumber;
    /** TEST **/
    var BlurSave;

    function init(){

        frameNumber = 0;
        /* INIT */

        scene = new THREE.Scene();

        camera = new THREE.Camera();
        scene.add(camera);

        webcam.updateSources(function(s){
            webcam.start('theVideo',s[0]);
        });

        var size = SCREEN_WIDTH * SCREEN_HEIGHT;

        video = document.getElementById( 'theVideo' );
        videoContext = document.getElementById('theCanvas').getContext('2d');

        //The textures
        currTexture = new THREE.DataTexture([],SCREEN_WIDTH,SCREEN_HEIGHT);
        prevTexture = new THREE.DataTexture([],SCREEN_WIDTH,SCREEN_HEIGHT);
        prevBlur = new THREE.DataTexture([], SCREEN_WIDTH, SCREEN_HEIGHT);
        currTexture.minFilter = prevTexture.minFilter = prevBlur.minFilter=  THREE.LinearFilter;

        prevTime = -1;

        renderer = new THREE.WebGLRenderer();
        renderer.setSize(SCREEN_WIDTH, SCREEN_HEIGHT);
        renderer.domElement.width = SCREEN_WIDTH;
        renderer.domElement.height = SCREEN_HEIGHT;
        renderer.autoClear = false;
        document.body.insertBefore(renderer.domElement, document.body.childNodes[0]);

        uniforms = {
            currentTexture:   { type: "t", value: currTexture },
            mirrorImage:       { type: "i", value: 0}
        }

        var geometry = new THREE.PlaneBufferGeometry(1, 1);
        var material = new THREE.ShaderMaterial( {
            uniforms: uniforms,
            vertexShader: THREE.passThroughShader.vertexShader,
            fragmentShader: THREE.passThroughShader.fragmentShader

        } );

        // A plane with the current video context as texture
        var mesh = new THREE.Mesh(geometry,material);
        mesh.material.depthTest = false;
        mesh.material.depthWrite = false;
        scene.add(mesh);

        // COPY SHADER, used to render the current context to the screen
        var effectCopy = new THREE.ShaderPass(THREE.CopyShader);
        effectCopy.renderToScreen = true;

        /** Preprocess stage **/

        prepScene = new THREE.Scene();
        prepScene.add( new THREE.AmbientLight( 0xffffff ) );
        prepScene.add(mesh) // add the current quad

        var renderTargetParameters = {minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBFormat, stencilBuffer: false};

        //blur shaders
        hBlur = new THREE.ShaderPass(THREE.HorizontalBlurShader);
        hBlur.uniforms["h"].value = 1 / SCREEN_WIDTH;
        hBlur.enabled = values['preprocess'];

        vBlur = new THREE.ShaderPass(THREE.VerticalBlurShader);
        vBlur.uniforms["v"].value = 1 / SCREEN_HEIGHT;
        vBlur.enabled = values['preprocess'];

        BlurSave = new THREE.SavePass(new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, renderTargetParameters));

        //preprocess scene render pass
        var renderModelPrep = new THREE.RenderPass(prepScene, camera);
        var prevPassShader1 = new THREE.ShaderPass(THREE.passThroughShader);
        prevPassShader1.uniforms["mirrorImage"].value = 1;

        //Preprocess of the current image
        //It is this prepComposer's rendertarget value I want to use in the next loop
        prepComposer = new THREE.EffectComposer(renderer, new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, renderTargetParameters));
        prepComposer.addPass(renderModelPrep);
        prepComposer.addPass(prevPassShader1);
        prepComposer.addPass(hBlur);
        prepComposer.addPass(vBlur);
        prepComposer.addPass(BlurSave);

        //
       // subtractComposer.addPass(effectCopy);

        //Preprocess of the previous image
        //Want to skip this stage
        var prevPassShader = new THREE.ShaderPass(THREE.passThroughShader, prevTexture);
        prevPassShader.uniforms["currentTexture"].value = prevTexture;
        prevPassShader.uniforms["mirrorImage"].value = 1;
        var prevBlurSave = new THREE.SavePass(new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, renderTargetParameters));

        prepPrevComposer = new THREE.EffectComposer(renderer, new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, renderTargetParameters));
        prepPrevComposer.addPass(renderModelPrep);
        prepPrevComposer.addPass(prevPassShader);
        prepPrevComposer.addPass(hBlur);
        prepPrevComposer.addPass(vBlur);
        prepPrevComposer.addPass(prevBlurSave);

        /**------------------**/

        /**---------------------------**/

        /** Background Subtraction stage **/

        subtractScene = new THREE.Scene();
        subtractScene.add( new THREE.AmbientLight( 0xffffff ) );

        var renderTargetParameters3 = {minFilter: THREE.LinearFilter, magFilter: THREE.LinearFilter, format: THREE.RGBAFormat, stencilBuffer: false};

        //Background Subtraction shaders
        subtractShader = new THREE.ShaderPass(THREE.framediffShader);
        subtractShader.uniforms['currentTexture'].value = BlurSave.renderTarget; // from the preprocess
        subtractShader.uniforms['previousTexture'].value = prevBlurSave.renderTarget; //modelled background

        //Background subtraction scene render pass
        var renderSubtract = new THREE.RenderPass(subtractScene, camera);

        //Background subtraction Composer
        subtractComposer = new THREE.EffectComposer(renderer, new THREE.WebGLRenderTarget(SCREEN_WIDTH, SCREEN_HEIGHT, renderTargetParameters3));
        subtractComposer.addPass(renderSubtract);
        subtractComposer.addPass(subtractShader);

        //subtractComposer.addPass(effectCopy2);

        /**------------------------------**/

        /** Postprocessing stage **/

        //Dilation
        var dilationFilter = new THREE.ShaderPass(THREE.DilationShader);
        dilationFilter.enabled = values['postprocess'];
        postProcessFilters.push(dilationFilter);
        //Erosion
        var erosionFilter = new THREE.ShaderPass(THREE.ErosionShader);
        erosionFilter.enabled = values['postprocess'];
        postProcessFilters.push(erosionFilter);

        //Pawaskar's postprocess filter
        var pawaskarFilter = new THREE.ShaderPass(THREE.PawaskarPostShader);
        pawaskarFilter.uniforms['threshold'].value = values['threshold'];
        pawaskarFilter.enabled = values['postprocess'];
        postProcessFilters.push(pawaskarFilter);

        subtractComposer.addPass(pawaskarFilter);

        //Opening
        subtractComposer.addPass(erosionFilter);
        subtractComposer.addPass(dilationFilter);
        //Closing
        subtractComposer.addPass(dilationFilter);
        subtractComposer.addPass(erosionFilter);

        //The final result rendered to the screen
        subtractComposer.addPass(effectCopy);

        /**----------------------**/

        animate();

    }

    function animate()
    {

        if(video.readyState === video.HAVE_ENOUGH_DATA ){
            var time = video.currentTime;
            if(time !== prevTime){
                //Because a firefox bug when drawImage is used, need to catch NS_ERROR_NOT_AVAILABLE
                try {

                    videoContext.drawImage(video, 0, 0,SCREEN_WIDTH,SCREEN_HEIGHT); //update the video

                    if(currTexture.image.data.length){

                        //var imgData = getData(renderer.domElement);
                        //var imgData = renderer.domElement.toDataURL();

                       // var gl = renderer.getContext();
                        //gl.readPixels( 0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, gl.RGBA, gl.UNSIGNED_BYTE, prevBlur.image.data );

                       //prevBlur.image.data = imgData;
                       // prevBlur.needsUpdate = true;

                        /** I want to update the prevBlur texture with the BlurSave.renderTarget! **/

                        prevTexture.image.data = currTexture.image.data;
                        prevTexture.needsUpdate = true; //updates the previous texture in the shader
                    }

                    currTexture.image.data = new Uint8Array(videoContext.getImageData(0,0,SCREEN_WIDTH, SCREEN_HEIGHT).data);
                    currTexture.needsUpdate = true; //updates the current texture in the shader
                    prevTime = time;

                }catch (e) {
                    if (e.name == "NS_ERROR_NOT_AVAILABLE") {
                        console.error(e);
                    } else {
                        throw e;
                    }
                }
            }
        }


        prepComposer.render(0.05);
        prepPrevComposer.render(0.05);
        subtractComposer.render(0.05);

        frameNumber++;
        requestAnimationFrame(animate);
    }

     function getData(image) {
        var canvas = document.createElement('canvas');
        canvas.width = image.width;
        canvas.height = image.height;

        var context = canvas.getContext('2d');
        context.drawImage(image, 0, 0);

        return new Uint8Array(context.getImageData(0, 0, image.width, image.height).data);
    }


    function copyCanvas(e) {
        var imgData, imgNode;
        if (e.which !== 80) {
            return;
        } else {
            imgData = renderer.domElement.toDataURL();
        }
        // create a new image and add to the document
        imgNode = document.createElement("img");
        imgNode.src = imgData;
        document.body.appendChild(imgNode);
    }


    window.onload = init;

    window.addEventListener("keyup", copyCanvas);

</script>

</body>
</html>
  • 如何使用prevBlur.image.data的当前图像数据更新BlurSave.rendertarget

  • 是否有其他方法可以使用上一个时间步的Sampler2D图像数据中的值更新着色器的WebGLRenderTarget制服?

1 个答案:

答案 0 :(得分:1)

如果您想使用使用当前帧和上一帧之间差异的减法着色器对场景进行后处理,您可以执行以下操作:

首先创建两个渲染目标rt1 and rt2。设置currentRT = rt1prevRT = rt2

然后在渲染循环中,(1)渲染到currentRT,然后(2)将currentRTprevRT作为制服传递到减法着色器并渲染到屏幕,然后( 3)交换渲染目标。

three.js r.70