在经历了许多痛苦的时间试图弄清楚为什么我的灯光搞砸后,我仍然不知所措。
OpenGL法线是正确的(背面剔除不会导致我的任何三角形消失)
我计算法线以便为光照插值,同一面上的所有三角形也具有相同的法线。
如果有任何人有任何可以理解的想法。
我绝对是OpenGL的新手,所以在我的代码中有点明显。
这是我的着色器:
顶点着色器
#version 330 core
layout(location = 0) in vec3 Position;
layout(location = 1) in vec3 vertexColor;
in vec3 vNormal;
out vec3 fragmentColor; // Output data ; will be interpolated for each fragment.
uniform mat4 MVP;
uniform mat4 transformMatrix;
uniform vec4 LightPosition;
// output values that will be interpretated per-fragment
out vec3 fN;
out vec3 fE;
out vec3 fL;
void main()
{
fN = vNormal;
fE = Position.xyz;
fL = LightPosition.xyz;
if( LightPosition.w != 0.0 ) {
fL = LightPosition.xyz - Position.xyz;
}
// Output position of the vertex, in clip space : MVP * position
vec4 v = vec4(Position,1); // Transform in homoneneous 4D vector
gl_Position = MVP * v;
//gl_Position = MVP * v;
// The color of each vertex will be interpolated
// to produce the color of each fragment
//fragmentColor = vertexColor; // take out at some point
}
和fragmentShader,使用phong着色
#version 330
//out vec3 color;
// per-fragment interpolated values from the vertex shader
in vec3 fN;
in vec3 fL;
in vec3 fE;
out vec4 fColor;
uniform vec4 AmbientProduct, DiffuseProduct, SpecularProduct;
uniform mat4 ModelView;
uniform vec4 LightPosition;
uniform float Shininess;
in vec3 fragmentColor; // Interpolated values from the vertex shaders
void main()
{
// Normalize the input lighting vectors
vec3 N = normalize(fN);
vec3 E = normalize(fE);
vec3 L = normalize(fL);
vec3 H = normalize( L + E );
vec4 ambient = AmbientProduct;
float Kd = max(dot(L, N), 0.0);
vec4 diffuse = Kd*DiffuseProduct;
float Ks = pow(max(dot(N, H), 0.0), Shininess);
vec4 specular = Ks*SpecularProduct;
// discard the specular highlight if the light's behind the vertex
if( dot(L, N) < 0.0 ) {
specular = vec4(0.0, 0.0, 0.0, 1.0);
}
fColor = ambient + diffuse + specular;
fColor.a = 1.0;
//color = vec3(1,0,0);
// Output color = color specified in the vertex shader,
// interpolated between all 3 surrounding vertices
//color = fragmentColor;
}
void setMatrices()
{
GLfloat FoV = 45; // the zoom of the camera
glm::vec3 cameraPosition(4,3,3), // the position of your camera, in world space // change to see what happends
cameraTarget(0,0,0), // where you want to look at, in world space
upVector(0,-1,0);
// Projection matrix : 45° Field of View, 4:3 ratio, display range : 0.1 unit <-> 100 units
glm::mat4 Projection = glm::perspective(FoV, 3.0f / 3.0f, 0.001f, 100.0f); // ratio needs to change here when the screen size/ratio changes
// Camera matrix
glm::mat4 View = glm::lookAt(
cameraPosition, // Camera is at (4,3,3), in World Space
cameraTarget, // and looks at the origin
upVector // Head is up (set to 0,-1,0 to look upside-down)
);
// Model matrix : an identity matrix (model will be at the origin)
glm::mat4 Model = glm::mat4(1.0f); // Changes for each model !
// Our ModelViewProjection : multiplication of our 3 matrices
glm::mat4 MVP = Projection * View * Model * transformMatrix; //matrix multiplication is the other way around
// Get a handle for our "MVP" uniform.
// Only at initialisation time.
GLuint MatrixID = glGetUniformLocation(programID, "MVP");
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
// For each model you render, since the MVP will be different (at least the M part)
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
RotationID = glGetUniformLocation(programID,"transformMatrix");
//lighting
cubeNormal = glGetAttribLocation( programID, "vNormal" );
}
void setBuffers()
{
// Get a vertex array object
GLuint VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glUseProgram(programID);
// cube buffer objects
glGenBuffers(1, &CubeVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(CubeBufferData), CubeBufferData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
// cube normal objects
glGenBuffers(1, &CubeNormalbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, CubeNormalbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(CubeNormalBufferData), CubeNormalBufferData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//octahedron buffer objects
glGenBuffers(1, &OctaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(octahedronBufData), octahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//tetrahedron buffer objects
glGenBuffers(1, &TetraVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(tetrahedronBufData), tetrahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//dodecahedron buffer objects
glGenBuffers(1, &DodecaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(dodecahedronBufData), dodecahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//icosahedron buffer objects
glGenBuffers(1, &icosaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(icosahedronBufData), icosahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
//sphere buffer objects
glGenBuffers(1, &sphereVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer
glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer
glBufferData(GL_ARRAY_BUFFER, sizeof(sphereBufData), sphereBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL.
glGenBuffers(1, &colorbuffer);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_color_buffer_data), g_color_buffer_data, GL_STATIC_DRAW);
// lighting stuff
// Initialize shader lighting parameters
point4 light_position= { 0.0, 20.0, -10.0, 0.0 };
color4 light_ambient ={ 0.2, 0.2, 0.2, 1.0 };
color4 light_diffuse ={ 1.0, 1.0, 1.0, 1.0 };
color4 light_specular ={ 1.0, 1.0, 1.0, 1.0 };
color4 material_ambient ={ 1.0, 0.0, 1.0, 1.0 };
color4 material_diffuse ={ 1.0, 0.8, 0.0, 1.0 };
color4 material_specular ={ 1.0, 0.8, 0.0, 1.0 };
float material_shininess = 20.0;
color4 ambient_product;
color4 diffuse_product;
color4 specular_product;
int i;
for (i = 0; i < 3; i++) {
ambient_product[i] = light_ambient[i] * material_ambient[i];
diffuse_product[i] = light_diffuse[i] * material_diffuse[i];
specular_product[i] = light_specular[i] * material_specular[i];
}
//printColor("diffuse", diffuse_product);
//printColor("specular", specular_product);
glUniform4fv( glGetUniformLocation(programID, "AmbientProduct"),
1, ambient_product );
glUniform4fv( glGetUniformLocation(programID, "DiffuseProduct"),
1, diffuse_product );
glUniform4fv( glGetUniformLocation(programID, "SpecularProduct"),
1, specular_product );
glUniform4fv( glGetUniformLocation(programID, "LightPosition"),
1, light_position );
glUniform1f( glGetUniformLocation(programID, "Shininess"),
material_shininess );
}
还有一些......
void display()
{
setMatrices(); // initilize Matrices
// Use our shader
//glUseProgram(programID);
glClearColor(0.0f, 0.0f, 0.3f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// 2nd attribute buffer : colors
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(0); // 1rst attribute buffer : vertices
// enum platosShapes{tet, cube, octah, dodec, icos};
switch(shapeInUse)
{
case tet:
{
glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 4*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case cube:
{
//GLuint cubeNormal = glGetAttribLocation( programID, "vNormal" );
glEnableVertexAttribArray( cubeNormal );
glVertexAttribPointer( cubeNormal, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid *) (sizeof(CubeNormalBufferData)) );
//glDisableVertexAttribArray( cubeNormal );
glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 12*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case octah:
{
glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 8*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case dodec:
{
glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLE_FAN, 0, 5 * 6); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
glDrawArrays(GL_TRIANGLE_FAN, (5 * 6) + 1, 30);
//glutSolidDodecahedron();
//glDrawArrays(GL_TRIANGLE_STRIP,0,5*12);
}
break;
case icos:
{
glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 3*20); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case sphere:
{
glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
//glDrawElements(GL_TRIANGLES, cnt2, GL_UNSIGNED_INT, 0)
glDrawArrays(GL_TRIANGLE_FAN, 0, 100);
}
}
glDisableVertexAttribArray(0);
glFlush();
}
还有一些........
void calculateNormals(GLfloat bufData[], GLfloat normBufData[], int size) // probalby works
{
int count = 0;
GLfloat temp[9];
for(int i = 0; i < size; i++)
{
temp[count] = bufData[i];
count++;
if((i+1) % 9 == 0)
{
count = 0;
//for(int i = 0; i < 9; i++)
//{
// cout << temp[i] << "!,";
// if((i + 1) % 3 == 0)
// cout << "\n";
//}
calculateCross(temp, normBufData);
}
}
printNormals(normBufData, size);
}
void calculateCross(GLfloat bufData[], GLfloat normBufData[]) // probably works
{
static int counter = 0; // need to reset in bettween new buffers
glm::vec3 C1;
glm::vec3 C2;
glm::vec3 normal;
//cout << bufData[0] << "," << bufData[1] << "," << bufData[2] << " buf 1 \n";
//cout << bufData[3] << "," << bufData[4] << "," << bufData[5] << " buf 2 \n";
//cout << bufData[6] << "," << bufData[7] << "," << bufData[8] << " buf 3 \n\n";
//C1.x = bufData[3] - bufData[0];
//C1.y = bufData[4] - bufData[1];
//C1.z = bufData[5] - bufData[2];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
C1.x = bufData[0] - bufData[3];
C1.y = bufData[1] - bufData[4];
C1.z = bufData[2] - bufData[5];
C2.x = bufData[0] - bufData[6];
C2.y = bufData[1] - bufData[7];
C2.z = bufData[2] - bufData[8];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
//cout << C1.x << " 1x \n";
//cout << C1.y << " 1y \n";
//cout << C1.z << " 1z \n";
//cout << C2.x << " 2x \n";
//cout << C2.y << " 2y \n";
//cout << C2.z << " 2z \n";
normal = glm::cross(C1, C2);
//cout << "\nNORMAL : " << normal.x << "," << normal.y << "," << normal.z << " counter = " << counter << "\n";
for(int j = 0; j < 3; j++)
{
for(int i = 0; i < 3; i++)
{
normBufData[counter] = normal.x;
normBufData[counter + 1] = normal.y;
normBufData[counter + 2] = normal.z;
}
counter+=3;
}
}
和主要.....
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(700, 700); // Window Size
glutCreateWindow("Michael - Lab 3");
glutDisplayFunc(display);
glutTimerFunc(10, timeFucn, 10);
glutIdleFunc(Idle);
glutKeyboardFunc(keyboard);
glewExperimental = GL_TRUE;
glewInit();
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST); // Enable depth test
glDepthFunc(GL_LESS); // Accept fragment if it closer to the camera than the former one
GenerateSphere(); // this function generates points for the sphere
programID = LoadShader( "VertexShader.glsl", "FragmentShader.glsl" ); // Create and compile our GLSL program from the shaders
setBuffers(); // initilize buffers
calculateNormals(CubeBufferData,CubeNormalBufferData,108); // calculate norms
//printNormals(CubeNormalBufferData);
glutMainLoop();
}
答案 0 :(得分:3)
在调用glVertexAttribPointer( cubeNormal, 3,....);
之前,您忘记将缓冲区对象与法线绑定。因此,法线的实际数据来自颜色缓冲区,这会导致最奇怪的Phong评估结果。
BTW,编码风格很好:)
答案 1 :(得分:0)
Phong和Gouraud阴影不适用于具有所有平面表面的物体,例如:一个立方体。