使用2个renderTargets

时间:2017-11-16 09:51:20

标签: three.js glsl post-processing

我正在尝试实施this effect。正如视频中所解释的那样,我必须制作2个额外的renderTargets,将当前图像与renderTarget#1混合到renderTarget#2中,但是我在three.js中实现它时遇到了困难。你可以在这里查看我的代码

let w = window.innerWidth
let h = window.innerHeight

const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()

let frontBuffer = createRenderTarget()
let backBuffer  = frontBuffer.clone()
let readBuffer  = frontBuffer
let writeBuffer = backBuffer

const renderScene = new THREE.Scene()
const renderCamera = new THREE.OrthographicCamera(-w / 2, w / 2, -h / 2, h / 2, -1000, 1000)
const renderMaterial = new THREE.ShaderMaterial({
  uniforms: {
    tDiffuse: { value: writeBuffer.texture }
  },
  vertexShader: `
    varying vec2 vUv;

    void main () {
      gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);

      vUv = uv;
    }
  `,
  fragmentShader: `
    uniform sampler2D tDiffuse;

    varying vec2 vUv;

    void main () {
      gl_FragColor = texture2D(tDiffuse, vUv);
    }
  `
})
const renderMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  renderMaterial
)
renderMesh.rotation.x += Math.PI
renderScene.add(renderMesh)
  
let timeElapsed = 0
let shape

setMainScene()
renderFrame()

function createRenderTarget () {
  let type = THREE.FloatType
  if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType

  let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
    type,
    wrapS: THREE.ClampToEdgeWrapping,
    wrapT: THREE.ClampToEdgeWrapping,
    format: THREE.RGBAFormat,
    minFilter: THREE.NearestFilter,
    magFilter: THREE.NearestFilter,
    stencilBuffer: false,
    depthBuffer: true
  })
  
  renderTarget.texture.generateMipmaps = false
  renderTarget.setSize(w, h)

  return renderTarget
}

function swapBuffers () {
  if (readBuffer === frontBuffer) {
    readBuffer  = backBuffer
    writeBuffer = frontBuffer
  } else {
    readBuffer  = frontBuffer
    writeBuffer = backBuffer
  }
}

function setMainScene () {
  renderer.setSize(w, h)
  renderer.setClearColor(0x111111)
  renderer.setPixelRatio(window.devicePixelRatio || 1)
  document.body.appendChild(renderer.domElement)

  camera.position.set(0, 20, 100)
  camera.lookAt(new THREE.Vector3())

  shape = new THREE.Mesh(
    new THREE.SphereBufferGeometry(10, 20, 20),
    new THREE.MeshBasicMaterial({ color: 0xFF0000 })
  )
  scene.add(shape)
}

function renderFrame () {
  requestAnimationFrame(renderFrame)

  renderer.render(scene, camera, writeBuffer)
  renderer.render(renderScene, renderCamera)
  swapBuffers()
  
  timeElapsed += clock.getDelta()
  
  shape.position.x = Math.sin(timeElapsed) * 20.0
  shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>

首先,我创建了两个额外的帧缓冲区:

let frontBuffer = createRenderTarget()
let backBuffer  = frontBuffer.clone()
let readBuffer  = frontBuffer
let writeBuffer = backBuffer

function createRenderTarget () {
  let type = THREE.FloatType
  if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType

  let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
    type,
    wrapS: THREE.ClampToEdgeWrapping,
    wrapT: THREE.ClampToEdgeWrapping,
    format: THREE.RGBAFormat,
    minFilter: THREE.NearestFilter,
    magFilter: THREE.NearestFilter,
    stencilBuffer: false,
    depthBuffer: true
  })

  renderTarget.texture.generateMipmaps = false
  renderTarget.setSize(w, h)

  return renderTarget
}

然后我创建了一个额外的场景,一个覆盖屏幕的平面(我将渲染我的主场景)和一个正交相机。我将主场景渲染的结果图像作为制服传递给我的后处理平面:

const renderScene = new THREE.Scene()
const renderCamera = new THREE.OrthographicCamera(-w / 2, w / 2, -h / 2, h / 2, -1000, 1000)
const renderMaterial = new THREE.ShaderMaterial({
  uniforms: {
    tDiffuse: { value: writeBuffer.texture }
  },
  vertexShader: `
    varying vec2 vUv;

    void main () {
      gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);

      vUv = uv;
    }
  `,
  fragmentShader: `
    uniform sampler2D tDiffuse;

    varying vec2 vUv;

    void main () {
      gl_FragColor = texture2D(tDiffuse, vUv);
    }
  `
})

最后,在我的动画循环中,我首先将主场景渲染到当前的fbo,然后渲染我的后处理平面并交换缓冲区:

function swapBuffers () {
  if (readBuffer === frontBuffer) {
    readBuffer  = backBuffer
    writeBuffer = frontBuffer
  } else {
    readBuffer  = frontBuffer
    writeBuffer = backBuffer
  }
}

function renderFrame () {
  requestAnimationFrame(renderFrame)

  renderer.render(scene, camera, writeBuffer)
  renderer.render(renderScene, renderCamera)
  swapBuffers()

  timeElapsed += clock.getDelta()

  shape.position.x = Math.sin(timeElapsed) * 20.0
  shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0
}

这一切都很好,我可以看到我的主场景渲染显示在后处理平面上,但我无法理解如何将它与前一个帧缓冲区混合。我想我目前的实现非常错误,但是信息很少,我根本无法理解如何实现这种混合。

我尝试将两个缓冲区作为纹理传递,然后在GLSL中将它们混合在一起,如下所示:

// js
uniforms: {
    tDiffuse1: { value: writeBuffer.texture },
    tDiffuse2: { value: readBuffer.texture }
  }

// glsl
gl_FragColor = mix(texture2D(tDiffuse1, vUv), texture2D(tDiffuse2, vUv), 0.5);

但在视觉上我看不到任何混合。

1 个答案:

答案 0 :(得分:4)

您需要3个渲染目标。我们称他们为sceneTargetpreviousTargetresultTarget

第1步:将场景渲染到sceneTarget

现在您的场景位于sceneTarget.texture

第2步:将sceneTarget.texturepreviousTarget.texture混合到resultTarget

这个你需要2个纹理作为输入,就像你在问题的底部提到的那样。您需要更新材质制服,以便每帧都使用正确的纹理

renderMaterial.uniforms.tDiffuse1.value = previousTarget.texture;
renderMaterial.uniforms.tDiffuse2.value = sceneTarget.texture;

现在,您在resultTarget.texture

中有一个混合结果

第3步:将resultTarget.texture渲染到画布。

现在你可以看到结果。

第4步:交换resultTargetpreviousTarget

let w = window.innerWidth
let h = window.innerHeight

const scene = new THREE.Scene()
const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
const renderer = new THREE.WebGLRenderer()
const clock = new THREE.Clock()

let sceneTarget = createRenderTarget()
let previousTarget  = sceneTarget.clone();
let resultTarget  = sceneTarget.clone();

const blendScene = new THREE.Scene();
const blendCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const blendMaterial = new THREE.ShaderMaterial({
  uniforms: {
    tDiffuse1: { value: previousTarget.texture },
    tDiffuse2: { value: sceneTarget.texture },
  },
  vertexShader: `
    varying vec2 vUv;

    void main () {
      gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);

      vUv = uv;
    }
  `,
  fragmentShader: `
    uniform sampler2D tDiffuse1;
    uniform sampler2D tDiffuse2;

    varying vec2 vUv;

    void main () {
      gl_FragColor = mix(texture2D(tDiffuse1, vUv), texture2D(tDiffuse2, vUv), 0.25);
    }
  `,
});
const blendMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  blendMaterial
);
blendMesh.rotation.x = Math.PI;
blendScene.add(blendMesh);

const resultScene = new THREE.Scene();
const resultCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
const resultMaterial = new THREE.MeshBasicMaterial({
  map: resultTarget.texture,
});
const resultMesh = new THREE.Mesh(
  new THREE.PlaneBufferGeometry(w, h),
  resultMaterial
);
resultMesh.rotation.x = Math.PI;
resultScene.add(resultMesh);

let shape

setMainScene()
renderFrame(0)

function createRenderTarget () {
  let type = THREE.FloatType
  if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType

  let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
    type,
    wrapS: THREE.ClampToEdgeWrapping,
    wrapT: THREE.ClampToEdgeWrapping,
    format: THREE.RGBAFormat,
    minFilter: THREE.NearestFilter,
    magFilter: THREE.NearestFilter,
    stencilBuffer: false,
    depthBuffer: true
  })
  
  renderTarget.texture.generateMipmaps = false
  renderTarget.setSize(w, h)

  return renderTarget
}

function swapBuffers () {
  const temp = previousTarget;
  previousTarget = resultTarget;
  resultTarget = temp;
}

function setMainScene () {
  renderer.setSize(w, h)
  renderer.setClearColor(0x111111)
  renderer.setPixelRatio(window.devicePixelRatio || 1)
  document.body.appendChild(renderer.domElement)

  camera.position.set(0, 20, 100);
  camera.lookAt(new THREE.Vector3());

  shape = new THREE.Mesh(
    new THREE.SphereBufferGeometry(10, 20, 20),
    new THREE.MeshBasicMaterial({ color: 0xFF0000 })
  );
  scene.add(shape);
}

function renderFrame (timeElapsed) {
  timeElapsed *= 0.001;
  
  renderer.render(scene, camera, sceneTarget);
  
  blendMaterial.uniforms.tDiffuse1.value = previousTarget.texture;
  blendMaterial.uniforms.tDiffuse2.value = sceneTarget.texture;
  renderer.render(blendScene, blendCamera, resultTarget);
  
  resultMaterial.map = resultTarget.texture;
  renderer.render(resultScene, resultCamera);
  swapBuffers();

  shape.position.x = Math.sin(timeElapsed) * 20.0;
  shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0;
  
  requestAnimationFrame(renderFrame);
  
}
* { margin: 0; padding: 0; }
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>

我还要补充一点,这并不是一个好的持久性影响。我不确定最好的是什么。上面的问题是你设置持久性越高,你看到的当前帧越少。

更好的一个,虽然它需要选择淡出色,但是会是这样的。只需要2个目标,previousTargetcurrentTarget

  1. 使用着色器将previousTarget.texture渲染到currentTarget 它会消失成某种颜色。 mix(tex, color, 0.05)或类似的东西。

  2. 将场景渲染到currentTarget

  3. currentTarget.texture渲染到画布

  4. 交换currentTargetpreviousTarget

  5. let w = window.innerWidth
    let h = window.innerHeight
    
    const scene = new THREE.Scene()
    const camera = new THREE.PerspectiveCamera(60, w / h, 0.1, 1000)
    const renderer = new THREE.WebGLRenderer()
    const clock = new THREE.Clock()
    
    let currentTarget = createRenderTarget()
    let previousTarget  = currentTarget.clone();
    
    const fadeScene = new THREE.Scene();
    const fadeCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
    const fadeMaterial = new THREE.ShaderMaterial({
      uniforms: {
        tDiffuse: { value: previousTarget.texture },
      },
      vertexShader: `
        varying vec2 vUv;
    
        void main () {
          gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);
    
          vUv = uv;
        }
      `,
      fragmentShader: `
        uniform sampler2D tDiffuse;
    
        varying vec2 vUv;
    
        void main () {
          vec4 fadeColor = vec4(0,0,0,1);
          gl_FragColor = mix(texture2D(tDiffuse, vUv), fadeColor, 0.05);
        }
      `,
    });
    const fadeMesh = new THREE.Mesh(
      new THREE.PlaneBufferGeometry(w, h),
      fadeMaterial
    );
    fadeMesh.rotation.x = Math.PI;
    fadeScene.add(fadeMesh);
    
    const resultScene = new THREE.Scene();
    const resultCamera = new THREE.OrthographicCamera(-w/2, w/2, -h/2, h/2, -1000, 1000);
    const resultMaterial = new THREE.MeshBasicMaterial({
      map: currentTarget.texture,
    });
    const resultMesh = new THREE.Mesh(
      new THREE.PlaneBufferGeometry(w, h),
      resultMaterial
    );
    resultMesh.rotation.x = Math.PI;
    resultScene.add(resultMesh);
    
    let shape
    
    setMainScene()
    renderFrame(0)
    
    function createRenderTarget () {
      let type = THREE.FloatType
      if( renderer.extensions.get( 'OES_texture_float_linear' ) === null ) type = THREE.HalfFloatType
    
      let renderTarget = new THREE.WebGLRenderTarget( 1, 1, {
        type,
        wrapS: THREE.ClampToEdgeWrapping,
        wrapT: THREE.ClampToEdgeWrapping,
        format: THREE.RGBAFormat,
        minFilter: THREE.NearestFilter,
        magFilter: THREE.NearestFilter,
        stencilBuffer: false,
        depthBuffer: true
      })
      
      renderTarget.texture.generateMipmaps = false
      renderTarget.setSize(w, h)
    
      return renderTarget
    }
    
    function swapBuffers () {
      const temp = previousTarget;
      previousTarget = currentTarget;
      currentTarget = temp;
    }
    
    function setMainScene () {
      renderer.setSize(w, h)
      renderer.setClearColor(0x111111)
      renderer.setPixelRatio(window.devicePixelRatio || 1)
      renderer.autoClearColor = false;
      document.body.appendChild(renderer.domElement)
    
      camera.position.set(0, 20, 100);
      camera.lookAt(new THREE.Vector3());
    
      shape = new THREE.Mesh(
        new THREE.SphereBufferGeometry(10, 20, 20),
        new THREE.MeshBasicMaterial({ color: 0xFF0000 })
      );
      scene.add(shape);
    }
    
    function renderFrame (timeElapsed) {
      timeElapsed *= 0.001;
      
      fadeMaterial.uniforms.tDiffuse.value = previousTarget.texture;
      renderer.render(fadeScene, fadeCamera, currentTarget);
      
      renderer.render(scene, camera, currentTarget);
        
      resultMaterial.map = currentTarget.texture;
      renderer.render(resultScene, resultCamera);
      swapBuffers();
    
      shape.position.x = Math.sin(timeElapsed) * 20.0;
      shape.position.y = Math.cos(timeElapsed * Math.PI) * 20.0;
      
      requestAnimationFrame(renderFrame);
      
    }
    * { margin: 0; padding: 0; }
    <script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/88/three.min.js"></script>