从深度重建的位置导致不正确的照明

时间:2013-11-03 06:14:57

标签: c++ opengl glsl glm-math

我试图从存储在GL_DEPTH_ATTACHMENT中的深度值重建我的片段的位置。为此,我将深度线性化,然后将深度乘以从摄像机位置到远平面上相应点的光线。

此方法是第here段描述的第二种方法。为了将光线从摄像机传到远处平面,我将光线检索到远处平面的四个角,将它们传递到我的顶点着色器,然后插入到片段着色器中。我正在使用以下代码将光线从相机传输到世界空间中的远平面角落。

std::vector<float> Camera::GetFlatFarFrustumCorners() {
    // rotation is the orientation of my camera in a quaternion.
    glm::quat inverseRotation = glm::inverse(rotation);
    glm::vec3 localUp = glm::normalize(inverseRotation * glm::vec3(0.0f, 1.0f, 0.0f));
    glm::vec3 localRight = glm::normalize(inverseRotation * glm::vec3(1.0f, 0.0f, 0.0f));
    float farHeight = 2.0f * tan(90.0f / 2) * 100.0f;
    float farWidth = farHeight * aspect;

    // 100.0f is the distance to the far plane. position is the location of the camera in word space.
    glm::vec3 farCenter = position + glm::vec3(0.0f, 0.0f, -1.0f) * 100.0f;
    glm::vec3 farTopLeft = farCenter + (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
    glm::vec3 farTopRight = farCenter + (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));
    glm::vec3 farBottomLeft = farCenter - (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
    glm::vec3 farBottomRight = farCenter - (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));

    return { 
        farTopLeft.x, farTopLeft.y, farTopLeft.z,
        farTopRight.x, farTopRight.y, farTopRight.z,
        farBottomLeft.x, farBottomLeft.y, farBottomLeft.z,
        farBottomRight.x, farBottomRight.y, farBottomRight.z
    };
}

这是检索世界空间中远平面角落的正确方法吗?

当我在着色器上使用这些角落时,结果是不正确的,我得到的似乎是在视图空间中。这些是我使用的着色器:

顶点着色器:

layout(location = 0) in vec2 vp;
layout(location = 1) in vec3 textureCoordinates;

uniform vec3 farFrustumCorners[4];
uniform vec3 cameraPosition;

out vec2 st;
out vec3 frustumRay;

void main () {
    st = textureCoordinates.xy;
    gl_Position = vec4 (vp, 0.0, 1.0);
    frustumRay = farFrustumCorners[int(textureCoordinates.z)-1] - cameraPosition;
}

Fragment Shader:

in vec2 st;
in vec3 frustumRay;

uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;

uniform vec3 cameraPosition;
uniform vec3 lightPosition;

out vec3 color;

void main () {
    // Far and near distances; Used to linearize the depth value.
    float f = 100.0;
    float n = 0.1;
    float depth = (2 * n) / (f + n - (texture(depthTexture, st).x) * (f - n));
    vec3 position = cameraPosition + (normalize(frustumRay) * depth);
    vec3 normal = texture(normalTexture, st);


    float k = 0.00001;
    vec3 distanceToLight = lightPosition - position;
    float distanceLength = length(distanceToLight);
    float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
    float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
    vec3 diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;

    vec3 gamma = vec3(1.0/2.2);
    color = pow(texture(colorTexture, st).xyz+diffuse, gamma);

    //color = texture(colorTexture, st);
    //colour.r = (2 * n) / (f + n - texture( tex, st ).x * (f - n));
    //colour.g = (2 * n) / (f + n - texture( tex, st ).y* (f - n));
    //colour.b = (2 * n) / (f + n - texture( tex, st ).z * (f - n));
}

这是我的场景在这些着色器下的照明效果: Horrible lighting

我很确定这是我重建位置完全错误或者处于错误空间的结果。我的重建有什么问题,我该怎么做才能解决它?

1 个答案:

答案 0 :(得分:2)

您首先想要做的是开发一个临时添加到您的G-Buffer设置,该设置存储每个片段在世界/视图空间中的初始位置(实际上,您尝试在此重建的任何空间)。然后编写一个除深度缓冲区重构这些位置的着色器。设置所有内容,使屏幕的一半显示原始G缓冲区,另一半显示重建位置。你应该能够立即发现这种差异。

也就是说,你可能想看一下我过去用来从深度缓冲区重建(对象空间)位置的实现。它首先将您带入视图空间,然后使用逆模型视图矩阵转到对象空间。您可以轻松地调整它以适应世界空间。它可能不是最灵活的实现,FOV是硬编码的,但你可以轻松修改它以使用制服......

修剪片段着色器:

flat in mat4 inv_mv_mat;
     in vec2 uv;

...

float linearZ (float z)
{
#ifdef INVERT_NEAR_FAR
  const float f = 2.5;
  const float n = 25000.0;
#else
  const float f = 25000.0;
  const float n = 2.5;
#endif

  return n / (f - z * (f - n)) * f;
}

vec4
reconstruct_pos (float depth)
{
  depth = linearZ (depth);

  vec4 pos = vec4 (uv * depth, -depth, 1.0); 
  vec4 ret = (inv_mv_mat * pos);

  return ret / ret.w;
}

在延迟着色光照过程的顶点着色器阶段需要一些额外的设置,如下所示:

#version 150 core

in       vec4 vtx_pos;
in       vec2 vtx_st;

uniform  mat4 modelview_mat; // Matrix used when the G-Buffer was built
uniform  mat4 camera_matrix; // Matrix used to stretch the G-Buffer over the viewport

uniform float buffer_res_x;
uniform float buffer_res_y;

     out vec2 tex_st;
flat out mat4 inv_mv_mat;
     out vec2 uv;


// Hard-Coded 45 degree FOV
//const float fovy = 0.78539818525314331; // NV pukes on the line below!
//const float fovy = radians (45.0);
//const float tan_half_fovy = tan (fovy * 0.5);

const float   tan_half_fovy = 0.41421356797218323;

      float   aspect        = buffer_res_x / buffer_res_y;
      vec2    inv_focal_len = vec2 (tan_half_fovy * aspect,
                                    tan_half_fovy);

const vec2    uv_scale     = vec2 (2.0, 2.0);
const vec2    uv_translate = vec2 (1.0, 1.0);


void main (void)
{
  inv_mv_mat  = inverse (modelview_mat);
  tex_st      = vtx_st;
  gl_Position = camera_matrix * vtx_pos;
  uv          = (vtx_st * uv_scale - uv_translate) * inv_focal_len;
}

深度范围反转是您可能会发现对延迟着色有用的东西,通常透视深度缓冲区可以提供比近距离所需的精度更高的精度,而且远远不足以进行质量重建。如果你通过反转深度范围来翻转它们的头部,你甚至可以在使用硬件深度缓冲区时稍微改变一下。详细讨论了here