使用THREE.JS和WEBGL的体积渲染无法在VR中正确显示

时间:2019-01-17 03:10:55

标签: three.js webgl webvr

我正在学习使用WebGL和Three.js实现体积渲染。我看到了LEBARBA研究(http://www.lebarba.com/)的结果,我想对此加以改进,将体积渲染的结果放入VR。

我看到了Three.js实现VR的一些示例,因此我使用了WebVR.js并设置renderer.vr.enabled = true; document.body.appendChild(WEBVR.createButton(renderer)); renderer.setAnimationLoop(render);

但是当我运行程序时,渲染的结果在浏览器屏幕上看起来不错,并且我可以进行一些旋转交互。

当我带上Oculus并进入vr模式时,我只能看到一个几乎充满的立方体框,而看不到纹理。抬头查看HMD时,可以看到部分纹理,但是纹理的运动方向与头部的运动方向相反。纹理和盒子似乎是分开的。 sceneFirstPass和sceneSecondPass不合并。

是VR模式改变了两个场景的位置吗?

完整的项目在这里。https://github.com/zhudongwork/volumeRendering

<html lang="en">
<head>
    <title>VR View</title>
    <meta charset="utf-8">
    <style>
        body {
            color: #ffffff;
            font-family:Monospace;
            font-size:13px;
            text-align:center;
            font-weight: bold;

            background-color: #050505;
            margin: 0px;
            overflow: hidden;
        }

        #info {
            position: absolute;
            top: 0px; width: 100%;
            padding: 5px;
        }

        a {
            color: #ffffff;
        }

        #oldie a { color:#da0 }
    </style>
</head>
<body>
    <div id="container">
        <div>Transfer function</div>
        0.0<img id="transferFunctionImg" style="align:right"/>1.0
    </div>
    <script src="js/three.js"></script>
    <script src="js/OrbitControls.js"></script>
    <script src="js/WebVR.js"></script>

    <script id="fragmentShaderFirstPass" type="x-shader/x-fragment">
        varying vec3 worldSpaceCoords;

        void main()
        {
            //The fragment's world space coordinates as fragment output.
            gl_FragColor = vec4( worldSpaceCoords.x , worldSpaceCoords.y, worldSpaceCoords.z, 1 );
        }
    </script>
    <script id="vertexShaderFirstPass" type="x-shader/x-vertex">
        varying vec3 worldSpaceCoords;

        void main()
        {
            //Set the world space coordinates of the back faces vertices as output.
            worldSpaceCoords = position + vec3(0.5, 0.5, 0.5); //move it from [-0.5;0.5] to [0,1]
            gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
        }
    </script>
    <script id="fragmentShaderSecondPass" type="x-shader/x-fragment">
        varying vec3 worldSpaceCoords;
        varying vec4 projectedCoords;
        uniform sampler2D tex, cubeTex, transferTex;
        uniform float steps;
        uniform float alphaCorrection;
        // The maximum distance through our rendering volume is sqrt(3).
        // The maximum number of steps we take to travel a distance of 1 is 512.
        // ceil( sqrt(3) * 512 ) = 887
        // This prevents the back of the image from getting cut off when steps=512 & viewing diagonally.
        const int MAX_STEPS = 887;

        //Acts like a texture3D using Z slices and trilinear filtering.
        vec4 sampleAs3DTexture( vec3 texCoord )
        {
            vec4 colorSlice1, colorSlice2;
            vec2 texCoordSlice1, texCoordSlice2;

            //The z coordinate determines which Z slice we have to look for.
            //Z slice number goes from 0 to 255.
            float zSliceNumber1 = floor(texCoord.z  * 255.0);

            //As we use trilinear we go the next Z slice.
            float zSliceNumber2 = min( zSliceNumber1 + 1.0, 255.0); //Clamp to 255

            //The Z slices are stored in a matrix of 16x16 of Z slices.
            //The original UV coordinates have to be rescaled by the tile numbers in each row and column.
            texCoord.xy /= 16.0;

            texCoordSlice1 = texCoordSlice2 = texCoord.xy;

            //Add an offset to the original UV coordinates depending on the row and column number.
            texCoordSlice1.x += (mod(zSliceNumber1, 16.0 ) / 16.0);
            texCoordSlice1.y += floor((255.0 - zSliceNumber1) / 16.0) / 16.0;

            texCoordSlice2.x += (mod(zSliceNumber2, 16.0 ) / 16.0);
            texCoordSlice2.y += floor((255.0 - zSliceNumber2) / 16.0) / 16.0;

            //Get the opacity value from the 2D texture.
            //Bilinear filtering is done at each texture2D by default.
            colorSlice1 = texture2D( cubeTex, texCoordSlice1 );
            colorSlice2 = texture2D( cubeTex, texCoordSlice2 );

            //Based on the opacity obtained earlier, get the RGB color in the transfer function texture.
            colorSlice1.rgb = texture2D( transferTex, vec2( colorSlice1.a, 1.0) ).rgb;
            colorSlice2.rgb = texture2D( transferTex, vec2( colorSlice2.a, 1.0) ).rgb;

            //How distant is zSlice1 to ZSlice2. Used to interpolate between one Z slice and the other.
            float zDifference = mod(texCoord.z * 255.0, 1.0);

            //Finally interpolate between the two intermediate colors of each Z slice.
            return mix(colorSlice1, colorSlice2, zDifference) ;
        }


        void main( void ) {

            //Transform the coordinates it from [-1;1] to [0;1]
            vec2 texc = vec2(((projectedCoords.x / projectedCoords.w) + 1.0 ) / 2.0,
                            ((projectedCoords.y / projectedCoords.w) + 1.0 ) / 2.0 );

            //The back position is the world space position stored in the texture.
            vec3 backPos = texture2D(tex, texc).xyz;

            //The front position is the world space position of the second render pass.
            vec3 frontPos = worldSpaceCoords;

            //The direction from the front position to back position.
            vec3 dir = backPos - frontPos;

            float rayLength = length(dir);

            //Calculate how long to increment in each step.
            float delta = 1.0 / steps;

            //The increment in each direction for each step.
            vec3 deltaDirection = normalize(dir) * delta;
            float deltaDirectionLength = length(deltaDirection);

            //Start the ray casting from the front position.
            vec3 currentPosition = frontPos;

            //The color accumulator.
            vec4 accumulatedColor = vec4(0.0);

            //The alpha value accumulated so far.
            float accumulatedAlpha = 0.0;

            //How long has the ray travelled so far.
            float accumulatedLength = 0.0;

            //If we have twice as many samples, we only need ~1/2 the alpha per sample.
            //Scaling by 256/10 just happens to give a good value for the alphaCorrection slider.
            float alphaScaleFactor = 25.6 * delta;

            vec4 colorSample;
            float alphaSample;

            //Perform the ray marching iterations
            for(int i = 0; i < MAX_STEPS; i++)
            {
                //Get the voxel intensity value from the 3D texture.
                colorSample = sampleAs3DTexture( currentPosition );

                //Allow the alpha correction customization.
                alphaSample = colorSample.a * alphaCorrection;

                //Applying this effect to both the color and alpha accumulation results in more realistic transparency.
                alphaSample *= (1.0 - accumulatedAlpha);

                //Scaling alpha by the number of steps makes the final color invariant to the step size.
                alphaSample *= alphaScaleFactor;

                //Perform the composition.
                accumulatedColor += colorSample * alphaSample;

                //Store the alpha accumulated so far.
                accumulatedAlpha += alphaSample;

                //Advance the ray.
                currentPosition += deltaDirection;
                accumulatedLength += deltaDirectionLength;

                //If the length traversed is more than the ray length, or if the alpha accumulated reaches 1.0 then exit.
                if(accumulatedLength >= rayLength || accumulatedAlpha >= 1.0 )
                    break;
            }

            gl_FragColor  = accumulatedColor;

        }
    </script>

    <script id="vertexShaderSecondPass" type="x-shader/x-vertex">
        varying vec3 worldSpaceCoords;
        varying vec4 projectedCoords;

        void main()
        {
            worldSpaceCoords = (modelMatrix * vec4(position + vec3(0.5, 0.5,0.5), 1.0 )).xyz;
            gl_Position = projectionMatrix *  modelViewMatrix * vec4( position, 1.0 );
            projectedCoords =  projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
        }
    </script>

    <script>
        var container;
        var sceneFirstPass, sceneSecondPass;
        var camera,renderer;
        var rtTexture, transferTexture;
        var cubeTextures;
        var materialFirstPass,materialSecondPass;
        init();
        animate();

        function init() {

            container = document.getElementById( 'container' );
            camera = new THREE.PerspectiveCamera( 40, window.innerWidth / window.innerHeight, 0.01, 3000.0 );
            camera.position.z = 2.0;

            controls = new THREE.OrbitControls( camera, container );
            controls.center.set( 0.0, 0.0, 0.0 );

            var loader = new THREE.TextureLoader();
            cubeTextures = loader.load('data/teapot.raw.png' );

            //Don't let it generate mipmaps to save memory and apply linear filtering to prevent use of LOD.
            cubeTextures.generateMipmaps = false;
            cubeTextures.minFilter = THREE.LinearFilter;
            cubeTextures.magFilter = THREE.LinearFilter;

            var transferTexture = updateTransferFunction();

            var screenSize = new THREE.Vector2( window.innerWidth, window.innerHeight );
            rtTexture = new THREE.WebGLRenderTarget( screenSize.x, screenSize.y,
                                                    {   minFilter: THREE.LinearFilter,
                                                        magFilter: THREE.LinearFilter,
                                                        wrapS:  THREE.ClampToEdgeWrapping,
                                                        wrapT:  THREE.ClampToEdgeWrapping,
                                                        format: THREE.RGBAFormat,
                                                        type: THREE.FloatType,
                                                        generateMipmaps: false} );

            materialFirstPass = new THREE.ShaderMaterial( {
                vertexShader: document.getElementById( 'vertexShaderFirstPass' ).textContent,
                fragmentShader: document.getElementById( 'fragmentShaderFirstPass' ).textContent,
                side: THREE.BackSide
            } );
            materialSecondPass = new THREE.ShaderMaterial( {
                vertexShader: document.getElementById( 'vertexShaderSecondPass' ).textContent,
                fragmentShader: document.getElementById( 'fragmentShaderSecondPass' ).textContent,
                side: THREE.FrontSide,
                uniforms: { tex:  { type: "t", value: rtTexture.texture },
                            cubeTex:  { type: "t", value: cubeTextures },
                            transferTex:  { type: "t", value: transferTexture },
                            steps : {type: "1f" , value: 256 },
                            alphaCorrection : {type: "1f" , value: 1 }}
             });

            sceneFirstPass = new THREE.Scene();
            sceneSecondPass = new THREE.Scene();
            sceneSecondPass.background=new THREE.Color(0x808080);

            var boxGeometry = new THREE.BoxGeometry(1.0, 1.0, 1.0);
            boxGeometry.doubleSided = true;

            var meshFirstPass = new THREE.Mesh( boxGeometry, materialFirstPass );
            var meshSecondPass = new THREE.Mesh( boxGeometry, materialSecondPass );

            sceneFirstPass.add( meshFirstPass );
            sceneSecondPass.add( meshSecondPass );

            renderer = new THREE.WebGLRenderer({ antialias: true } );
            renderer.setPixelRatio( window.devicePixelRatio );
            renderer.setSize( window.innerWidth, window.innerHeight );
            renderer.vr.enabled = true;
            container.appendChild( renderer.domElement );
            //renderer.autoClear = false;

            onWindowResize();

            window.addEventListener( 'resize', onWindowResize, false );
            document.body.appendChild( WEBVR.createButton( renderer ) );

        }

        function updateTransferFunction()
        {
            var canvas = document.createElement('canvas');
            canvas.height = 16;
            canvas.width = 256;

            var ctx = canvas.getContext('2d');

            var grd = ctx.createLinearGradient(0, 0, canvas.width -1 , canvas.height - 1);
            grd.addColorStop(0.1, 'rgba(255,0,0,0)');
            grd.addColorStop(0.7, 'rgba(0,255,0,0.5)');
            grd.addColorStop(1, 'rgba(0,0,255,0.9)');

            ctx.fillStyle = grd;
            ctx.fillRect(0,0,canvas.width -1 ,canvas.height -1 );

            var img = document.getElementById("transferFunctionImg");
            img.src = canvas.toDataURL();
            img.style.width = "256 px";
            img.style.height = "128 px";

            transferTexture =  new THREE.Texture(canvas);
            transferTexture.wrapS = transferTexture.wrapT =  THREE.ClampToEdgeWrapping;
            transferTexture.needsUpdate = true;

            return transferTexture;
        }

        function onWindowResize( event ) {

            camera.aspect = window.innerWidth / window.innerHeight;
            camera.updateProjectionMatrix();

            renderer.setSize( window.innerWidth, window.innerHeight );
        }

        function animate() {
            renderer.setAnimationLoop( render );
        }

        function render() {
            renderer.render( sceneFirstPass, camera, rtTexture, true );
            renderer.render( sceneSecondPass, camera );
        }


    </script>

</body>

0 个答案:

没有答案