来自深度缓冲区的OpenGL计算法线和TBN矩阵(SSAO实现)

时间:2016-10-27 21:50:46

标签: c++ opengl shader depth-buffer ssao

我在OpenGL中实施SSAO,遵循本教程:Jhon Chapman SSAO

基本上,所描述的技术使用半球内核,该内核沿着片段的法线定向。然后将样本的视图空间z位置与其屏幕空间深度缓冲值进行比较。 如果深度缓冲区中的值较高,则意味着样本以几何形状结束,因此应该遮挡此片段。

这种技术的目标是摆脱对象平面灰色的经典实现工件。

我有2个小差异的相同实现

  • 我没有使用Noise纹理来旋转我的内核,所以我有条带工件,现在很好
  • 我无法访问具有每像素法线的缓冲区,因此我必须仅使用深度缓冲区来计算我的法线和TBN矩阵。

算法似乎工作正常,我可以看到碎片被遮挡,但我的脸仍然是灰白的...... IMO它来自我计算我的TBN矩阵的方式。法线看起来还不错,但有些东西一定是错的,因为我的内核似乎没有正确对齐,导致样本最终出现在面孔中。

屏幕截图的内核为8个样本,半径为.1。第一个是SSAO传递的结果,第二个是生成的法线的调试渲染。

以下是计算Normal和TBN矩阵的函数的代码



    mat3 computeTBNMatrixFromDepth(in sampler2D depthTex, in vec2 uv)
    {
        // Compute the normal and TBN matrix
        float ld = -getLinearDepth(depthTex, uv);
        vec3 x = vec3(uv.x, 0., ld);
        vec3 y = vec3(0., uv.y, ld);
        x = dFdx(x);
        y = dFdy(y);
        x = normalize(x);
        y = normalize(y);
        vec3 normal = normalize(cross(x, y));
        return mat3(x, y, normal);
    }


和SSAO着色器

#include "helper.glsl"

in vec2 vertTexcoord;
uniform sampler2D depthTex;

const int MAX_KERNEL_SIZE = 8;
uniform vec4 gKernel[MAX_KERNEL_SIZE];

// Kernel Radius in view space (meters)
const float KERNEL_RADIUS = .1; 

uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraProjectionMatrixInverse;

out vec4 FragColor;


void main()
{   
    // Get the current depth of the current pixel from the depth buffer (stored in the red channel)
    float originDepth = texture(depthTex, vertTexcoord).r;

    // Debug linear depth. Depth buffer is in the range [1.0];
    float oLinearDepth = getLinearDepth(depthTex, vertTexcoord);

    // Compute the view space position of this point from its depth value
    vec4 viewport = vec4(0,0,1,1);    
    vec3 originPosition = getViewSpaceFromWindow(cameraProjectionMatrix, cameraProjectionMatrixInverse, viewport, vertTexcoord, originDepth);

    mat3 lookAt = computeTBNMatrixFromDepth(depthTex, vertTexcoord);
    vec3 normal = lookAt[2];

    float occlusion = 0.;

    for (int i=0; i<MAX_KERNEL_SIZE; i++) 
    {
        // We align the Kernel Hemisphere on the fragment normal by multiplying all samples by the TBN        
        vec3 samplePosition = lookAt * gKernel[i].xyz;

        // We want the sample position in View Space and we scale it with the kernel radius
        samplePosition = originPosition + samplePosition * KERNEL_RADIUS;

        // Now we need to get sample position in screen space
        vec4 sampleOffset = vec4(samplePosition.xyz, 1.0);
        sampleOffset = cameraProjectionMatrix * sampleOffset;
        sampleOffset.xyz /= sampleOffset.w;

        // Now to get the depth buffer value at the projected sample position
        sampleOffset.xyz = sampleOffset.xyz * 0.5 + 0.5;

        // Now can get the linear depth of the sample
        float sampleOffsetLinearDepth = -getLinearDepth(depthTex, sampleOffset.xy);

        // Now we need to do a range check to make sure that object 
        // outside of the kernel radius are not taken into account
        float rangeCheck = abs(originPosition.z - sampleOffsetLinearDepth) < KERNEL_RADIUS ? 1.0 : 0.0;

        // If the fragment depth is in front so it's occluding
        occlusion += (sampleOffsetLinearDepth >= samplePosition.z ? 1.0 : 0.0) * rangeCheck;
    }  

    occlusion = 1.0 - (occlusion / MAX_KERNEL_SIZE);
    FragColor = vec4(vec3(occlusion), 1.0);
}

Computed Normals from Depth buffer SSAO Pass

更新1

TBN计算函数的这种变化给出了相同的结果

mat3 computeTBNMatrixFromDepth(in sampler2D depthTex, in vec2 uv)
{
    // Compute the normal and TBN matrix
    float ld = -getLinearDepth(depthTex, uv);
    vec3 a = vec3(uv, ld);
    vec3 x = vec3(uv.x + dFdx(uv.x), uv.y, ld + dFdx(ld));
    vec3 y = vec3(uv.x, uv.y + dFdy(uv.y), ld + dFdy(ld));
    //x = dFdx(x);
    //y = dFdy(y);
    //x = normalize(x);
    //y = normalize(y);
    vec3 normal = normalize(cross(x - a, y - a));
    vec3 first_axis = cross(normal, vec3(1.0f, 0.0f, 0.0f));
    vec3 second_axis = cross(first_axis, normal);
    return mat3(normalize(first_axis), normalize(second_axis), normal);
}

1 个答案:

答案 0 :(得分:1)

我认为问题可能是您正在混合坐标系。您正在将纹理坐标与线性深度结合使用。您可以想象两个垂直表面朝向屏幕左侧。两者都与垂直平面具有相同的角度,因此应该具有相同的正常右边?​​

但是,让我们想象其中一个表面距离相机更远。由于fFdx / fFdy函数基本上告诉您与相邻像素的差异,因此远离相机的表面在一个像素上将具有比靠近相机的表面更大的线性深度差异。但是uv.x / uv.y衍生物将具有相同的值。这意味着您将根据与相机的距离获得不同的法线。

解决方案是计算视图坐标并使用它的导数来计算法线。

vec3 viewFromDepth(in sampler2D depthTex, in vec2 uv, in vec3 view)
{
    float ld = -getLinearDepth(depthTex, uv);

    /// I assume ld is negative for fragments in front of the camera
    /// not sure how getLinearDepth is implemented

    vec3 z_scaled_view = (view / view.z) * ld;

    return z_scaled_view;
}

mat3 computeTBNMatrixFromDepth(in sampler2D depthTex, in vec2 uv, in vec3 view)
{
    vec3 view = viewFromDepth(depthTex, uv);

    vec3 view_normal = normalize(cross(dFdx(view), dFdy(view)));
    vec3 first_axis = cross(view_normal, vec3(1.0f, 0.0f, 0.0f));
    vec3 second_axis = cross(first_axis, view_normal);

    return mat3(view_normal, normalize(first_axis), normalize(second_axis));
}