我想将输入纹理从800x600下采样到四分之一(200x150像素)。但如果我这样做,我只能看到一点点的图像。似乎frament着色器不会对整个纹理进行下采样。以下示例是创建景深效果。
顶点着色器:
#version 330
uniform UVertFrameBuffer
{
mat4 m_ScreenMatrix;
};
uniform UDofInvertedScreenSize
{
vec2 m_InvertedScreenSize;
};
// -----------------------------------------------------------------------------
// in variables
// -----------------------------------------------------------------------------
layout(location = 0) in vec3 VertexPosition;
// -----------------------------------------------------------------------------
// out variables
// -----------------------------------------------------------------------------
struct SPixelCoords
{
vec2 tcColor0;
vec2 tcColor1;
};
out SPixelCoords vs_PixelCoords;
// -----------------------------------------------------------------------------
// Program
// -----------------------------------------------------------------------------
void main()
{
vec4 Position = vec4(VertexPosition.xy, 0.0f, 1.0f);
vec4 PositionInClipSpace = m_ScreenMatrix * Position;
vec2 ScreenCoords = VertexPosition.xy;
// -----------------------------------------------------------------------------
vs_PixelCoords.tcColor0 = ScreenCoords + vec2(-1.0f, -1.0f) * m_InvertedScreenSize;
vs_PixelCoords.tcColor1 = ScreenCoords + vec2(+1.0f, -1.0f) * m_InvertedScreenSize;
// -----------------------------------------------------------------------------
gl_Position = PositionInClipSpace;
}
片段着色器:
#version 330
uniform sampler2D g_ColorTex;
uniform sampler2D g_DepthTex;
uniform UDofDownBuffer
{
vec2 m_DofNear;
vec2 m_DofRowDelta;
};
// -----------------------------------------------------------------------------
// Inputs per vertice
// -----------------------------------------------------------------------------
struct SPixelCoords
{
vec2 tcColor0;
vec2 tcColor1;
};
in SPixelCoords vs_PixelCoords;
// -----------------------------------------------------------------------------
// Output to graphic card
// -----------------------------------------------------------------------------
layout (location = 0) out vec4 FragColor;
// -----------------------------------------------------------------------------
// Program
// -----------------------------------------------------------------------------
void main()
{
// Initialize variables
vec3 Color;
float MaxCoc;
vec4 Depth;
vec4 CurCoc;
vec4 Coc;
vec2 RowOfs[4];
// Calculate row offset
RowOfs[0] = vec2(0.0f);
RowOfs[1] = m_DofRowDelta.xy;
RowOfs[2] = m_DofRowDelta.xy * 2.0f;
RowOfs[3] = m_DofRowDelta.xy * 3.0f;
// Bilinear filtering to average 4 color samples
Color = vec3(0.0f);
Color += texture(g_ColorTex, vs_PixelCoords.tcColor0.xy + RowOfs[0]).rgb;
Color += texture(g_ColorTex, vs_PixelCoords.tcColor1.xy + RowOfs[0]).rgb;
Color += texture(g_ColorTex, vs_PixelCoords.tcColor0.xy + RowOfs[2]).rgb;
Color += texture(g_ColorTex, vs_PixelCoords.tcColor1.xy + RowOfs[2]).rgb;
Color /= 4.0f;
// Calculate CoC
...
// Calculate fragment color
FragColor = vec4(Color, MaxCoc);
}
输入纹理为800x600,输出纹理为200x150像素。作为m_InvertedScreenSize,我使用1/800和1/600像素。是吗?
我上传了两个代表OpenGL屏幕坐标的三角形。
QuadVertices[][1] = {
{ 0.0f, 1.0f, 0.0f, },
{ 1.0f, 1.0f, 0.0f, },
{ 1.0f, 0.0f, 0.0f, },
{ 0.0f, 0.0f, 0.0f, }, };
QuadIndices[][2] = {
{ 0, 1, 2, },
{ 0, 2, 3, }, };
我的屏幕矩阵通过正交矩阵将这些顶点转换为剪裁空间。
Position(0.0f, 0.0f, 1.0f);
Target(0.0f, 0.0f, 0.0f);
Up(0.0f, 1.0f, 0.0f);
LookAt(Position, Target, Up);
SetOrthographic(0.0f, 1.0f, 0.0f, 1.0f, -1.0f, 1.0f);
以下图像显示输入纹理和结果。第一个是没有下采样的原始图像。其次是实际的下采样纹理。第三个是下采样纹理,计算ScreenCoords * = 4.0f;。