我正在使用OpenGL和c ++构建一个应用程序,以在屏幕上显示透明对象,周围有多个灯光,并模拟透明对象可能产生的不同反射。然后,我将在固定摄像机位置拍摄屏幕快照,并随机移动对象以模拟反射,并将其与文件名相同的文本文件一起另存为本地文件中的BMP文件。
接下来,我需要获取对象在窗口坐标[x,y]中的位置并将其保存在我的输出文本文件中。
现在对我来说问题是要获取对象在窗口坐标中的位置。创建的窗口为1920 X1080。我需要对象的中心位置与[x * y]相同。
这是我的主要cpp文件
int RandGenerator()
{
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_real_distribution<double> distribution(-3.0, 3.0);
double number = distribution(generator);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
return number;
}
int RandGenerator_1()
{
std::random_device rd;
std::default_random_engine generator(rd());
std::uniform_real_distribution<double> distribution(-3.0, 3.0);
double number1 = distribution(generator);
std::this_thread::sleep_for(std::chrono::milliseconds(100));
return number1;
}
int main()
{
mainWindow = Window(1920, 1080);
mainWindow.initialise();
CreateObjects();
CreateShaders();
camera = Camera(glm::vec3(0.0f, 0.0f, 7.0f), glm::vec3(0.0f, 1.0f, 0.0f),
-90.0f, 0.0f, 5.0f, 0.2f);
shinyMaterial = Material(4.0f, 156);
dullMaterial = Material(0.5f, 4);
groundfloor = Model();
groundfloor.LoadModel("res/models/blender/Floor.obj");
blackhawk = Model();
blackhawk.LoadModel("res/models/blender/cup.obj");
cup2 = Model();
cup2.LoadModel("res/models/blender/cup.obj");
mainLight = DirectionalLight(1.0f, 1.0f, 1.0f,
0.1f, 0.1f,
0.5f, -1.0f, 20.0f);
unsigned int pointLightCount = 0;
pointLights[0] = PointLight(0.0f, 0.0f,1.0f,
1.0f, 1.0f,
3.0f, 0.0f, 1.0f,
1.0f, 0.2f, 0.1f);
pointLightCount++;
pointLights[1] = PointLight(1.0f, 1.0f, 1.0f,
1.0f, 1.0f,
-3.0f, 0.0f, 1.0f,
1.0f, 0.2f, 0.1f);
pointLightCount++;
GLuint uniformProjection = 0, uniformModel = 0, uniformView = 0,
uniformEyePosition = 0,
uniformSpecularIntensity =0,uniformShininess = 0;
glm::mat4 projection = glm::perspective(45.0f,
(GLfloat)mainWindow.getBufferWidth() /
(GLfloat)mainWindow.getBufferHeight(),
0.1f, 100.0f);
//Main game Loop
while (!mainWindow.getShouldClose())
{
GLfloat now = glfwGetTime();
deltaTime = now - lastTime;
lastTime = now;
// Handle user inputs and events
glfwPollEvents();
camera.keyControl(mainWindow.getKeys(), deltaTime);
camera.mouseControl(mainWindow.getXchange(), mainWindow.getYchange());
//clear the window
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
shaderList[0].UseShader();
uniformProjection = shaderList[0].GetProjectionLocation();
uniformModel = shaderList[0].GetModelLocation();
uniformView = shaderList[0].GetViewLocation();
uniformEyePosition = shaderList[0].GetEyePosition();
uniformSpecularIntensity = shaderList[0].GetSpecularIntensityLoc();
uniformShininess = shaderList[0].GetShininessLoc();
glm::vec3 lowerLight = camera.GetCameraPosition();
lowerLight.y -= 0.3f;
glm::vec3 camerayaw = camera.GetCameraDirection();
glm::vec3 dlightPos = mainLight.GetLightPos();
shaderList[0].SetDirectionalLight(&mainLight);
shaderList[0].SetPointLights(pointLights, pointLightCount);
shaderList[0].SetSpotLights(spotLights, spotLightCount);
glUniformMatrix4fv(uniformProjection, 1, GL_FALSE,
glm::value_ptr(projection));
glUniformMatrix4fv(uniformView, 1, GL_FALSE,
glm::value_ptr(camera.calulateViewMatrix()));
glUniform3f(uniformEyePosition, camera.GetCameraPosition().x,
camera.GetCameraPosition().y, camera.GetCameraPosition().z);
glEnable(GL_DEPTH_TEST);
model = glm::mat4(1.0f);
model = glm::translate(model, glm::vec3(0.0f, 0.0f, -0.46f));
model = glm::scale(model, glm::vec3(0.5f, 0.5f, 0.5f));
model = glm::rotate(model, 90.0f*toRad, glm::vec3(1.0f, 0.0f, 0.0f));
glUniformMatrix4fv(uniformModel, 1, GL_FALSE, glm::value_ptr(model));
dullMaterial.UseMaterial(uniformSpecularIntensity, uniformShininess);
groundfloor.RenderModel();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glDepthMask(GL_FALSE);
glm::vec3 translation(RandGenerator(), RandGenerator_1(), 0.0f);
model = glm::mat4(1.0f);
model = glm::translate(model, translation);
model = glm::scale(model, glm::vec3(1.0f, 1.0f, 1.0f));
viewmat = camera.calulateViewMatrix();
glm::mat4 model_view = projection * viewmat * model;
glUniformMatrix4fv(uniformModel, 1, GL_FALSE, glm::value_ptr(model));
shinyMaterial.UseMaterial(uniformSpecularIntensity, uniformShininess);
blackhawk.RenderModel();
glDisable(GL_CULL_FACE);
glDisable(GL_BLEND);
glDepthMask(GL_TRUE);
glUseProgram(0);
mainWindow.swapBuffers();
}
return 0;}
这是我的Vertex Shader文件
#version 330
layout (location =0) in vec3 pos;
layout (location =1) in vec2 tex;
layout (location =2) in vec3 normal;
out vec4 vCol;
out vec2 TexCoords;
out vec3 NormalValue;
out vec3 FragPos;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform vec3 clipSpacePos;
void main()
{
gl_Position = projection * view * model* vec4(pos, 1.0f);
vCol = vec4(clamp(pos, 0.0f, 1.0f), 0.2f);
TexCoords = tex;
NormalValue = mat3(transpose(inverse(model))) * normal;
FragPos = (model* vec4(pos, 1.0f)).xyz;
}
如果问题很基本,请原谅我,我是OpenGL以及编程方面的初学者。预先感谢,输出图像:
答案 0 :(得分:0)
要从场景坐标中获取屏幕坐标,您需要在其上应用所有变换矩阵并执行透视划分。
您没有提供任何示例代码,因此不清楚您是使用旧API还是使用新API ...以及所获得的矩阵(或其他任何形式)和乘法顺序...
请参阅以下两个相关的质量检查:
有关数学知识的方法。
现在什么样的反射和透明度?
对于真正的交易,您需要光线跟踪渲染
例如这样的东西:
但是这些通常很耗时...
对于廉价的假货,您可以使用Blending +环境立方体贴图技术
透明性可以这样完成:
对于反射,您只需添加环境立方体贴图。这意味着您在GL_CUBE_MAP_TEXTURE
中有一个(环境中的)天空盒,并计算从表面法线和方向到相机的反射光线...并在结果颜色中添加相应的立方体贴图纹理元素...旧样式的API,但更容易的是使用着色器。