所以我尝试实施SSAO,但它没有按预期工作。 它似乎在位置z = 0(世界空间)处分裂,在位置z = 0处存在白线。此外,遮挡也看起来不正确。
再加上更远的距离,所以当移动相机时,遮挡变得更加怪异
我的着色器渲染几何体(instanciated):
顶点:
#version 330 core
layout(location = 0) in vec3 vertexPosition;
layout(location = 1) in vec2 vertexUV;
layout(location = 2) in vec3 vertexColor;
layout(location = 3) in vec3 vertexNormal;
layout(location = 4) in mat4 offset;
layout(location = 8) in vec4 instanceColor;
uniform mat4 Projection;
uniform mat4 View;
out vec2 UV;
out vec4 Color;
out vec3 Normal;
void main()
{
mat4 Model = offset;
mat4 MVP = Projection * View * Model;
vec4 Pos = MVP * vec4(endpos,1);
gl_Position = Pos;
UV = vertexUV;
Color = instanceColor;
Normal = normalize((Model * vec4(vertexNormal,0)).xyz);
}
片段:
#version 330 core
in vec2 UV;
in vec4 Color;
in vec3 Normal;
uniform sampler2D Diffuse;
void main()
{
gl_FragData[0] = vec4(Color);
gl_FragData[1] = (vec4(Normal,1)+1)/2;
}
在Geometry通过后,我使用正常和深度信息应用SSAO传递。
这是我的NoiseTexture:
我使用硬件深度缓冲区。 我在世界空间里计算一切。
这是片段着色器:
#version 330 core
#define KERNEL_SIZE 16
uniform sampler2D NormalMap;
uniform sampler2D DepthMap;
uniform sampler2D NoiseTexture;
uniform vec2 NoiseScale;
uniform vec2 Resolution;
uniform mat4 InvertViewProjection;
uniform float g_sample_rad = 0.1;
uniform float g_intensity = 2.0;
uniform float g_scale = 0.1;
uniform float g_bias = 0.0;
vec2 CalcTexCoord()
{
return gl_FragCoord.xy / Resolution;
}
vec3 getPosition(vec2 uv)
{
vec4 worldpos;
float depth = texture2D(DepthMap, uv).r;
worldpos.x = uv.x * 2.0f - 1.0f;
worldpos.y = uv.y * 2.0f - 1.0f;
worldpos.z = depth * 2.0f - 1.0f;
worldpos.w = 1.0;
worldpos = InvertViewProjection * worldpos;
worldpos /= worldpos.w;
return worldpos.rgb;
}
vec3 getNormal(vec2 uv)
{
return normalize(texture2D(NormalMap, uv).xyz * 2.0f - 1.0f);
}
vec2 getRandom(vec2 uv)
{
return normalize(texture2D(NoiseTexture, Resolution*uv / NoiseScale).xy * 2.0f - 1.0f);
}
float doAmbientOcclusion(in vec2 tcoord, in vec2 uv, in vec3 p, in vec3 cnorm)
{
vec3 diff = getPosition(tcoord + uv) - p;
vec3 v = normalize(diff);
float d = length(diff)*g_scale;
return max(0.0, dot(cnorm, v) - g_bias)*(1.0 / (1.0 + d))*g_intensity;
}
void main()
{
vec4 Kernels[KERNEL_SIZE] =
vec4[](
vec4(0.355512, -0.709318, -0.102371, 0.0 ),
vec4(0.534186, 0.71511, -0.115167, 0.0 ),
vec4(-0.87866, 0.157139, -0.115167, 0.0 ),
vec4(0.140679, -0.475516, -0.0639818, 0.0 ),
vec4(-0.0796121, 0.158842, -0.677075, 0.0 ),
vec4(-0.0759516, -0.101676, -0.483625, 0.0 ),
vec4(0.12493, -0.0223423, -0.483625, 0.0 ),
vec4(-0.0720074, 0.243395, -0.967251, 0.0 ),
vec4(-0.207641, 0.414286, 0.187755, 0.0 ),
vec4(-0.277332, -0.371262, 0.187755, 0.0 ),
vec4(0.63864, -0.114214, 0.262857, 0.0 ),
vec4(-0.184051, 0.622119, 0.262857, 0.0 ),
vec4(0.110007, -0.219486, 0.435574, 0.0 ),
vec4(0.235085, 0.314707, 0.696918, 0.0 ),
vec4(-0.290012, 0.0518654, 0.522688, 0.0 ),
vec4(0.0975089, -0.329594, 0.609803, 0.0 )
);
vec2 uv = CalcTexCoord(); //same as UV Coordinate from Vertex
vec3 p = getPosition(uv);
vec3 n = getNormal(uv);
vec2 rand = getRandom(uv);
float ao = 0.0f;
float rad = g_sample_rad / p.z;
for (int j = 0; j < KERNEL_SIZE; ++j)
{
vec2 coord = reflect(Kernels[j].xy, rand)*rad;
ao += doAmbientOcclusion(uv, coord, p, n);
}
ao /= KERNEL_SIZE;
ao = 1 - (ao);
gl_FragColor = vec4(ao,ao,ao, 1);
}
答案 0 :(得分:0)
我通过逐步调试代码来解决这个问题。
我在世界空间中计算一切。处理那里的一切都比较容易。 我查看了一个使用视图空间的教程,并将我需要的所有内容更改为世界空间。
错误在于:
float rad = g_sample_rad / p.z;
根据距离计算采样半径。在视图空间中,这将计算距摄像机的距离的采样半径。 但是在世界空间中,这会计算距离世界坐标的距离,这导致它在z = 0时看起来很奇怪,甚至在更远的地方更奇怪。
所以我采取的措施很简单:
vec4 viewpos = CamView * vec4(p,1);
float ao = 0.0f;
float rad = g_sample_rad/viewpos.z;
我将点更改为查看空间并计算了视图空间中的采样半径,从而考虑了与相机的距离。
这解决了它。
我和g_values玩了一下,以满足我的需求。看看它到处都是一样的。这应该是它的样子。