当我点击它时,如何在3D空间中获取对象的z坐标。 (它不是真正的对象更多的图形,我需要知道用户选择了什么)我使用JOGL。
答案 0 :(得分:0)
我刚刚完成从picking sample移植g-truck ogl-samples。
我会尝试给你一个关于代码的快速解释。
我们首先启用深度测试
private boolean initTest(GL4 gl4) {
gl4.glEnable(GL_DEPTH_TEST);
return true;
}
在initBuffer
我们:
glGenBuffers
glBufferData
将使用第一个参数指定的目标上的任何内容,在这种情况下为GL_ELEMENT_ARRAY_BUFFER
GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT
(它是一个全局参数)以确定存储转换变量的最小统一块大小。如果我们想通过glBindBufferRange
来绑定它,这是我们不会使用的函数,而不是绑定我们的拾取缓冲区,这就是为什么我们只传递一个浮点数的大小,Float.BYTES
glBufferData
的最后一个参数只是一个提示(它取决于OpenGL,驱动程序可以做他们想要的),因为你看到索引和顶点是静态的,因为我们不会更改它们,但对于统一缓冲区是动态的,因为我们将每帧更新它们。代码:
private boolean initBuffer(GL4 gl4) {
gl4.glGenBuffers(Buffer.MAX.ordinal(), bufferName, 0);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName[Buffer.ELEMENT.ordinal()]);
ShortBuffer elementBuffer = GLBuffers.newDirectShortBuffer(elementData);
gl4.glBufferData(GL_ELEMENT_ARRAY_BUFFER, elementSize, elementBuffer, GL_STATIC_DRAW);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.VERTEX.ordinal()]);
FloatBuffer vertexBuffer = GLBuffers.newDirectFloatBuffer(vertexData);
gl4.glBufferData(GL_ARRAY_BUFFER, vertexSize, vertexBuffer, GL_STATIC_DRAW);
gl4.glBindBuffer(GL_ARRAY_BUFFER, 0);
int[] uniformBufferOffset = {0};
gl4.glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, uniformBufferOffset, 0);
int uniformBlockSize = Math.max(projection.length * Float.BYTES, uniformBufferOffset[0]);
gl4.glBindBuffer(GL_UNIFORM_BUFFER, bufferName[Buffer.TRANSFORM.ordinal()]);
gl4.glBufferData(GL_UNIFORM_BUFFER, uniformBlockSize, null, GL_DYNAMIC_DRAW);
gl4.glBindBuffer(GL_UNIFORM_BUFFER, 0);
gl4.glBindBuffer(GL_TEXTURE_BUFFER, bufferName[Buffer.PICKING.ordinal()]);
gl4.glBufferData(GL_TEXTURE_BUFFER, Float.BYTES, null, GL_DYNAMIC_READ);
gl4.glBindBuffer(GL_TEXTURE_BUFFER, 0);
return true;
}
在我们初始化纹理的initTexture
中,我们:
glGenTextures
GL_UNPACK_ALIGNMENT
设置为1(默认值通常为4个字节),以避免任何问题(因为您的水平纹理大小必须与对齐匹配)。 GL_TEXTURE0
,有一定数量的纹理槽,您需要在处理任何纹理之前指定它。glTexStorage2D
GL_UNPACK_ALIGNMENT
GL_TEXTURE0
我们的其他纹理PICKING
PICKING
纹理与PICKING
缓冲区关联glTexBuffer
代码:
private boolean initTexture(GL4 gl4) {
try {
jgli.Texture2D texture = new Texture2D(jgli.Load.load(TEXTURE_ROOT + "/" + TEXTURE_DIFFUSE));
jgli.Gl.Format format = jgli.Gl.instance.translate(texture.format());
gl4.glGenTextures(Texture.MAX.ordinal(), textureName, 0);
// Diffuse
{
gl4.glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
gl4.glActiveTexture(GL_TEXTURE0);
gl4.glBindTexture(GL_TEXTURE_2D, textureName[Texture.DIFFUSE.ordinal()]);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_R, GL_RED);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_G, GL_GREEN);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_B, GL_BLUE);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SWIZZLE_A, GL_ALPHA);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, texture.levels() - 1);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
gl4.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
gl4.glTexStorage2D(GL_TEXTURE_2D, texture.levels(), format.internal.value,
texture.dimensions(0)[0], texture.dimensions(0)[1]);
for (int level = 0; level < texture.levels(); ++level) {
gl4.glTexSubImage2D(GL_TEXTURE_2D, level,
0, 0,
texture.dimensions(level)[0], texture.dimensions(level)[1],
format.external.value, format.type.value,
texture.data(0, 0, level));
}
gl4.glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
}
// Piking
{
gl4.glBindTexture(GL_TEXTURE_BUFFER, textureName[Texture.PICKING.ordinal()]);
gl4.glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, bufferName[Buffer.PICKING.ordinal()]);
gl4.glBindTexture(GL_TEXTURE_BUFFER, 0);
}
} catch (IOException ex) {
Logger.getLogger(Gl_420_picking.class.getName()).log(Level.SEVERE, null, ex);
}
return true;
}
在initProgram
我们初始化我们的计划,通过:
glGenProgramPipelines
vertShaderCode
,其中GL_VERTEX_SHADER
是着色器类型,SHADERS_ROOT
是着色器源所在的位置,SHADERS_SOURCE_UPDATE
是名称和{ {1}}是扩展程序。"vert"
programName
glProgramParameteri
并进行链接和编译,shaderProgram
link
具有哪个程序阶段,glUseProgramStages 代码:
pipelineName
在private boolean initProgram(GL4 gl4) {
boolean validated = true;
gl4.glGenProgramPipelines(1, pipelineName, 0);
// Create program
if (validated) {
ShaderProgram shaderProgram = new ShaderProgram();
ShaderCode vertShaderCode = ShaderCode.create(gl4, GL_VERTEX_SHADER,
this.getClass(), SHADERS_ROOT, null, SHADERS_SOURCE_UPDATE, "vert", null, true);
ShaderCode fragShaderCode = ShaderCode.create(gl4, GL_FRAGMENT_SHADER,
this.getClass(), SHADERS_ROOT, null, SHADERS_SOURCE_UPDATE, "frag", null, true);
shaderProgram.init(gl4);
programName = shaderProgram.program();
gl4.glProgramParameteri(programName, GL_PROGRAM_SEPARABLE, GL_TRUE);
shaderProgram.add(vertShaderCode);
shaderProgram.add(fragShaderCode);
shaderProgram.link(gl4, System.out);
}
if (validated) {
gl4.glUseProgramStages(pipelineName[0], GL_VERTEX_SHADER_BIT | GL_FRAGMENT_SHADER_BIT, programName);
}
return validated & checkError(gl4, "initProgram");
}
我们:
initVertexArray
,并绑定它,glGenVertexArrays
glBindVertexArray
标识(这将与顶点着色器中的匹配),组件大小Semantic.Attr.POSITION
,类型2
,标准化GL_FLOAT
,步幅或者每个顶点属性false
的总大小以及此属性2 * 2 * Float.BYTES
中的偏移量。同样的颜色。0
,以便OpenGL可以知道这些参数引用哪个缓冲区。glVertexAttribPointer
代码:
glEnableVertexAttribArray
在private boolean initVertexArray(GL4 gl4) {
gl4.glGenVertexArrays(1, vertexArrayName, 0);
gl4.glBindVertexArray(vertexArrayName[0]);
{
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.VERTEX.ordinal()]);
gl4.glVertexAttribPointer(Semantic.Attr.POSITION, 2, GL_FLOAT, false, 2 * 2 * Float.BYTES, 0);
gl4.glVertexAttribPointer(Semantic.Attr.TEXCOORD, 2, GL_FLOAT, false, 2 * 2 * Float.BYTES, 2 * Float.BYTES);
gl4.glBindBuffer(GL_ARRAY_BUFFER, 0);
gl4.glEnableVertexAttribArray(Semantic.Attr.POSITION);
gl4.glEnableVertexAttribArray(Semantic.Attr.TEXCOORD);
gl4.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferName[Buffer.ELEMENT.ordinal()]);
}
gl4.glBindVertexArray(0);
return true;
}
我们:
render
缓冲区。 TRANSFORM
。pointer
中并回放缓冲区(位置再次设置为0)。pointer
和颜色缓冲区,颜色为depthValue
{1.0f, 0.5f, 0.0f, 1.0f}
被过度使用,但重要的是原始类型glDrawElementsInstancedBaseVertexBaseInstance
,索引数GL_TRIANGLES
及其类型elementCount
代码:
GL_UNSIGNED_SHORT
在我们的顶点着色器中,为每个顶点执行:
@Override
protected boolean render(GL gl) {
GL4 gl4 = (GL4) gl;
{
gl4.glBindBuffer(GL_UNIFORM_BUFFER, bufferName[Buffer.TRANSFORM.ordinal()]);
ByteBuffer pointer = gl4.glMapBufferRange(
GL_UNIFORM_BUFFER, 0, projection.length * Float.BYTES,
GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
FloatUtil.makePerspective(projection, 0, true, (float) Math.PI * 0.25f,
(float) windowSize.x / windowSize.y, 0.1f, 100.0f);
FloatUtil.makeIdentity(model);
FloatUtil.multMatrix(projection, view());
FloatUtil.multMatrix(projection, model);
for (float f : projection) {
pointer.putFloat(f);
}
pointer.rewind();
// Make sure the uniform buffer is uploaded
gl4.glUnmapBuffer(GL_UNIFORM_BUFFER);
}
gl4.glViewportIndexedf(0, 0, 0, windowSize.x, windowSize.y);
float[] depthValue = {1.0f};
gl4.glClearBufferfv(GL_DEPTH, 0, depthValue, 0);
gl4.glClearBufferfv(GL_COLOR, 0, new float[]{1.0f, 0.5f, 0.0f, 1.0f}, 0);
gl4.glBindProgramPipeline(pipelineName[0]);
gl4.glActiveTexture(GL_TEXTURE0);
gl4.glBindTexture(GL_TEXTURE_2D, textureName[Texture.DIFFUSE.ordinal()]);
gl4.glBindImageTexture(Semantic.Image.PICKING, textureName[Texture.PICKING.ordinal()],
0, false, 0, GL_WRITE_ONLY, GL_R32F);
gl4.glBindVertexArray(vertexArrayName[0]);
gl4.glBindBufferBase(GL_UNIFORM_BUFFER, Semantic.Uniform.TRANSFORM0, bufferName[Buffer.TRANSFORM.ordinal()]);
gl4.glDrawElementsInstancedBaseVertexBaseInstance(GL_TRIANGLES, elementCount, GL_UNSIGNED_SHORT, 0, 5, 0, 0);
gl4.glBindBuffer(GL_ARRAY_BUFFER, bufferName[Buffer.PICKING.ordinal()]);
ByteBuffer pointer = gl4.glMapBufferRange(GL_ARRAY_BUFFER, 0, Float.BYTES, GL_MAP_READ_BIT);
float depth = pointer.getFloat();
gl4.glUnmapBuffer(GL_ARRAY_BUFFER);
System.out.printf("Depth: %2.3f\n", depth);
return true;
}
一致Semantic
统一缓冲区Transform
块输出Block
和texCoord
我们的顶点在剪辑空间位置。传入的gl_Position
顶点位于模型空间中 - &gt; *模型矩阵=世界空间中的顶点,*视图/相机矩阵=相机/视图空间中的顶点,*投影矩阵=剪辑空间中的顶点。代码:
position
顶点着色器后可能还有其他阶段,例如曲面细分控制/评估和几何,但它们不是必需的。 最后一个阶段是片段着色器,每个片段/像素执行一次,类似地开始,然后我们:
#version 420 core
#define POSITION 0
#define COLOR 3
#define TEXCOORD 4
#define TRANSFORM0 1
precision highp float;
precision highp int;
layout(std140, column_major) uniform;
layout(binding = TRANSFORM0) uniform Transform
{
mat4 mvp;
} transform;
layout(location = POSITION) in vec3 position;
layout(location = TEXCOORD) in vec2 texCoord;
out gl_PerVertex
{
vec4 gl_Position;
};
out Block
{
vec2 texCoord;
} outBlock;
void main()
{
outBlock.texCoord = texCoord;
gl_Position = transform.mvp * vec4(position, 1.0);
}
上的纹理diffuse
,与binding 0
内的glActiveTexture(GL_TEXTURE0)
和imageBuffer render
相匹配,我们将在此保存我们的深度picking
,与binding 1
Semantic.Image.PICKING
匹配
render.glBindImageTexture
块Block
color
(内置函数)对应于拾取坐标gl_FragCoord
,请将当前z值pickingCoord
保存在imageBuffer gl_FragCoord.z
内并设置输出depth
到color
,否则我们将它设置为vec4(1, 0, 1, 1)
的漫反射贴图。 texture(diffuse, inBlock.texCoord.st)
是stqp选择的一部分,是xywz或rgba的同义词。代码:
st
最后在#version 420 core
#define FRAG_COLOR 0
precision highp float;
precision highp int;
layout(std140, column_major) uniform;
in vec4 gl_FragCoord;
layout(binding = 0) uniform sampler2D diffuse;
layout(binding = 1, r32f) writeonly uniform imageBuffer depth;
uvec2 pickingCoord = uvec2(320, 240);
in Block
{
vec2 texCoord;
} inBlock;
layout(location = FRAG_COLOR, index = 0) out vec4 color;
void main()
{
if(all(equal(pickingCoord, uvec2(gl_FragCoord.xy))))
{
imageStore(depth, 0, vec4(gl_FragCoord.z, 0, 0, 0));
color = vec4(1, 0, 1, 1);
}
else
color = texture(diffuse, inBlock.texCoord.st);
}
我们清理所有OpenGL资源:
end