DirectX11:向上/向下倾斜相机时,SSAO断开

时间:2019-07-04 17:22:41

标签: 3d directx hlsl

数周来,我一直在努力让SSAO与DirectX11 / C ++图形引擎一起使用,但我想不出在此代码中可能会犯的更多错误。

我正在关注this OpenGL tutorial,使用DirectX和HLSL(着色器模型5)着色器基本上具有完全相同的实现。我有两个帧缓冲区,一个用于位置数据,一个用于法线,都被转换为视图空间并在第一遍着色器中导出:

几何图形通过顶点着色器:

struct VStoPS {
    float4 pos_ : SV_Position;
    float4 posView_ : POSITION1;
    float4 normalView_ : NORMAL1;
};

/********************** constant buffers ***********************/
cbuffer cbCamera_ {
    float4x4 matView_;
    float4x4 matProjection_;
};

cbuffer cbTransformations_ {
    float4x4 matModel_;
    float4x4 matNormalView_;
};

/*************************** main ******************************/
VStoPS vs_main(float3 pos : POSITION, float3 normal0 : NORMAL0, float2 texCoord0 : TEXCOORD0) {
    VStoPS output = (VStoPS) 0;

    output.posView_ = mul(matView_, mul(matModel_, float4(pos, 1.0)));
    output.normalView_ = normalize(mul(matNormalView_, float4(normal0, 0.0)));

    float4x4 viewProj = mul(matProjection_, matView_);
    float4x4 mvp = mul(viewProj, matModel_);
    output.pos_ = mul(mvp, float4(pos, 1.0));

    return output;
}

几何图形传递像素着色器:

/************************** structs ****************************/
struct VStoPS {
    float4 pos_ : SV_Position;
    float4 posView_ : POSITION1;
    float4 normalView_ : NORMAL1;
};

struct PS_Output {
    float4 positionView;
    float4 normalView;
};

/*************************** main ******************************/
PS_Output ps_main(VStoPS input) : SV_Target
{
    PS_Output output = (PS_Output)0;
    output.positionView = input.posView_;
    output.normalView = input.normalView_;
    return output;
}

我这样计算法线视图矩阵:

mat4 normalView = (viewMatrix * modelMatrix).getTransposed().getInverse();

我构建了示例内核及其类似的随机旋转(随机函数返回介于0.0和1.0之间的浮点数):

// Build the main kernel with random samples
for (int i = 0; i < D3D_SSAO_SAMPLE_COUNT; i++)
{
    // Sample kernel is a hemisphere along the positive z axis
    vec3 sample(
        random() * 2.0f - 1.0f,
        random() * 2.0f - 1.0f,
        random()
    );

    // Put more samples closer to the origin of the hemisphere for better results
    float scale = lerp(0.1f, 1.0f, pow(static_cast<float>(i) / static_cast<float>(D3D_SSAO_SAMPLE_COUNT), 2));
    ssaoKernel_[i] = sample.getNormalized() * scale;
}

// Build random kernel rotations to reduce banding
for (int i = 0; i < D3D_SSAO_ROTATIONS_COUNT; i++)
{
    vec3 rotation(
        random() * 2.0f - 1.0f,
        random() * 2.0f - 1.0f,
        0.0f
    );

    ssaoKernelRotations_[i] = rotation.getNormalized();
}

然后渲染SSAO通行证。顶点着色器仅渲染全屏四边形,像素着色器执行实际的SSAO工作,如下所示:

/************************** structs ****************************/
struct VStoPS {
    float4 pos_ : SV_Position;
    float2 texCoord0_ : TEXCOORD0;
};

/********************** constant buffers ***********************/
cbuffer cbSSAO_ {
    float3 samples_[32];
    float3 rotations_[9];
};

cbuffer cbGBufferCamera_ {
    float4x4 matCameraView_;
    float4x4 matCameraProjection_;
};

cbuffer cbScreenInfo_ {
    int screenWidth_;
    int screenHeight_;
};

/********************** shader resources ***********************/
SamplerState sampler_;

Texture2D<float4> gPositionViewFramebuffer_;
Texture2D<float4> gNormalViewFramebuffer_;

/*************************** main ******************************/
float4 ps_main(VStoPS input) : SV_Target {
    const int kernelSize = 32;

    // Get the proper rotation vector for the current fragment
    const float w = (float) screenWidth_;
    const float h = (float) screenHeight_;
    const float2 noiseScale = float2(w / 3.0, h / 3.0);
    const float2 scaledCoordinates = input.texCoord0_ * noiseScale;
    const uint rotationIndex = (scaledCoordinates.x % 3) * (scaledCoordinates.y % 3);
    const float3 kernelRotationVector = normalize(rotations_[rotationIndex]);

    // Sample fragment position and normal from textures
    const float3 fragPos = gPositionViewFramebuffer_.Sample(sampler_, input.texCoord0_).xyz;
    const float3 normal = normalize(gNormalViewFramebuffer_.Sample(sampler_, input.texCoord0_).xyz);

    // Build a transformation matrix from tangent space to view space
    const float3 tangent = normalize(kernelRotationVector - normal * dot(kernelRotationVector, normal));
    const float3 bitangent = cross(normal, tangent);
    const float3x3 TBN = transpose(float3x3(tangent, bitangent, normal));

    // Calculate occlusion
    float occlusion = 0.0;
    const float radius = 0.5;

    for (int i = 0; i < kernelSize; i++)
    {
        // Transform the sample
        float3 currentSample = mul(TBN, samples_[i]);
        currentSample = fragPos + (currentSample * radius);

        // Get the respective depth value from the gBuffer at the same projected location
        float4 offset = float4(currentSample, 1.0);
        offset = mul(matCameraProjection_, offset);
        float2 coords = ((offset.xy / offset.w) + float2(1.0, 1.0)) / 2.0;

        float sampleDepth = gPositionViewFramebuffer_.Sample(sampler_, coords.xy).z;

        // Increase occlusion if the sample is actually occluded
        float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth));
        occlusion += (sampleDepth <= currentSample.z ? 1.0 : 0.0) * rangeCheck;
    }

    occlusion = 1.0 - (occlusion / ((float) kernelSize));
    return float4(occlusion, occlusion, occlusion, 1.0);
}

在那之后,我应用模糊滤镜来修复由随机旋转矢量产生的噪声。直截了当看,结果看起来还不错:

Kinda looks right

当然,有一些工件,它不是超级干净,但可行。镜头前实际上有一个立方体徘徊,但由于周围没有环境光遮挡,所以它甚至不会弹出。但是一旦我将相机向上倾斜,就会发生这种情况:

Slightly tilted upwards

基本上,整个场景沿y轴压缩,而多维数据集沿x轴镜像。我已经调试了几个小时,无法找出问题所在。以下是我排除的可能问题的列表(主要是使用NSight):

  • 正确创建了样本内核和旋转向量,并将其上载到常量缓冲区;他们是100%正确
  • 位置和法线数据似乎已正确转换,尽管我不确定100%确定包含数据的帧缓冲区是什么样子
  • 渲染器或着色器本身中没有警告,没有数据截断或类似内容
  • 我尝试转置不同的矩阵,因为DirectX使用与OpenGL不同的主要行布局,但这似乎并没有改变问题
  • 矩阵正确地上传到常量缓冲区,这是当我向上倾斜摄像机时的视图和投影矩阵的示例:

View and projection matrices when tilting the camera upwards

我真的很茫然。非常感谢您提供有关我可以尝试解决的任何帮助或技巧。如有必要,我很乐意提供有关代码的更多信息。

1 个答案:

答案 0 :(得分:1)

由于我无法找出问题所在,因此我实现了this solution instead,该方法运行良好,并且已经考虑了DirectX的编写。我将其标记为已解决。