在OpenGL 3+中纹理球体

时间:2012-01-05 16:38:08

标签: c++ opengl

我想围绕球体包裹1024 * 512图像。

这是我的代码:(并非全部显示)

在main.cpp中:

struct Vertex {

    GLdouble position[3];
    GLfloat color[3];
    GLfloat textureUV[2];
};

void SetupGemetry() {

    //Allocate 4 VBOs
    glGenBuffers(4, vbo);

    }

//Setting the current Buffer.
void setBuffer(int buffer_no, int no_vertices, Vertex *vertices_array)
{

    glBindBuffer(GL_ARRAY_BUFFER, vbo[buffer_no]);

    glBufferData ( GL_ARRAY_BUFFER, no_vertices * sizeof ( struct Vertex ), vertices_array, GL_STATIC_DRAW );

    glEnableVertexAttribArray( 0 );

    glVertexAttribPointer ( ( GLuint ) 0, 3, GL_DOUBLE, GL_FALSE, sizeof ( struct Vertex ), ( const GLvoid* ) 
    offsetof(struct Vertex ,position) );        

    glEnableVertexAttribArray ( 1 );

    glVertexAttribPointer ( ( GLuint ) 1, 3, GL_FLOAT, GL_FALSE, sizeof ( struct Vertex ), ( const GLvoid* )
    offsetof(struct Vertex,color) );

    //Textute Handling
    glEnableVertexAttribArray( 2 );
    glVertexAttribPointer(( GLuint ) 2,3,GL_FLOAT,GL_FALSE,sizeof(struct Vertex),(const GLvoid* )offsetof(struct Vertex ,texture));

}

现在在我的Sphere.cpp中(这里我加载* .bmp文件)

GLuint Sphere::loadBMP_custom(const char * imagepath)
{
    // Data read from the header of the BMP file
    unsigned char header[54]; // Each BMP file begins by a 54-bytes header
    unsigned int dataPos;     // Position in the file where the actual data begins
    unsigned int width, height;
    unsigned int imageSize;   // = width*height*3
    // Actual RGB data
    unsigned char * data;
    // Open the file
    FILE * file = fopen(imagepath,"rb");
    if (!file)
    {
        printf("Image could not be opened\n"); 
        return 0;
    }
    //Checking the header file
    if ( fread(header, 1, 54, file)!=54 )
    { // If not 54 bytes read : problem
        printf("Not a correct BMP file\n");
        return false;
    }
    if ( header[0]!='B' || header[1]!='M' )
    {
        printf("Not a correct BMP file\n");
        return 0;
    }

    dataPos    = *(int*)&(header[0x0A]);
    imageSize  = *(int*)&(header[0x22]);
    width      = *(int*)&(header[0x12]);
    height     = *(int*)&(header[0x16]);
    // Some BMP files are misformatted, guess missing information
    if (imageSize==0)    imageSize=width*height*3; // 3 : one byte for each Red, Green and Blue component
    if (dataPos==0)      dataPos=54; // The BMP header is done that way

    // Create a buffer
    data = new unsigned char [imageSize];

    // Read the actual data from the file into the buffer
    fread(data,1,imageSize,file);

    //Everything is in memory now, the file can be closed
    fclose(file);

    //OpenGL Part
    // Create one OpenGL texture
    GLuint textureID;
    glGenTextures(1, &textureID);

    // "Bind" the newly created texture : all future texture functions will modify this texture
    glBindTexture(GL_TEXTURE_2D, textureID);

    // Give the image to OpenGL
    glTexImage2D(GL_TEXTURE_2D, 0,GL_RGB, width, height, 0, GL_BGR, GL_UNSIGNED_BYTE, data);

    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}

void Sphere::SetupShaders_sphere(void){

    // Read our shaders into the appropriate buffers
    sphere_vertexsource = filetobuf_sphere("vertex_shader_2.vert");
    sphere_fragmentsource = filetobuf_sphere("fragment_shader_2.frag");

    //Assign our handles a "name" to new shader objects 
    sphere_vertexshader = glCreateShader(GL_VERTEX_SHADER);
    sphere_fragmentshader = glCreateShader(GL_FRAGMENT_SHADER);

    // Associate the source code buffers with each handle
    glShaderSource(sphere_vertexshader, 1, (const GLchar**)&sphere_vertexsource, 0);
    glShaderSource(sphere_fragmentshader, 1, (const GLchar**)&sphere_fragmentsource, 0);

    //Setting them up by compiling, attaching and linking them!
    glCompileShader(sphere_vertexshader);
    glCompileShader(sphere_fragmentshader);

    sphere_shaderprogram = glCreateProgram();
    glAttachShader(sphere_shaderprogram, sphere_vertexshader);
    glAttachShader(sphere_shaderprogram, sphere_fragmentshader);

    glBindAttribLocation(sphere_shaderprogram, 0, "in_Position"); 
    glBindAttribLocation(sphere_shaderprogram, 1, "in_Color");

    glLinkProgram(sphere_shaderprogram);
    glUseProgram(sphere_shaderprogram);

}

现在我的片段和顶点着色器:

顶点:

#version 330 core

// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 in_Position;
layout(location = 1) in vec3 inColor;
layout(location = 2) in vec2 vertexUV;

// Output data ; will be interpolated for each fragment.
out vec2 UV;

// Values that stay constant for the whole mesh.
uniform mat4 MVP_matrix;

void main(){

    // Output position of the vertex, in clip space : MVP * position
    gl_Position =  MVP_matrix * vec4(in_Position,1);

    // UV of the vertex. No special space for this one.
    UV = vertexUV;
}

片段:

#version 330 core

// Interpolated values from the vertex shaders
in vec2 UV;

// Ouput data
out vec3 color;

// Values that stay constant for the whole mesh.
uniform sampler2D myTextureSampler;

void main(){

    // Output color = color of the texture at the specified UV
    color = texture2D( myTextureSampler, UV ).rgb;
}

如何将图像文件坐标(UV)转换为3D坐标?如何将它传递给我的顶点着色器? (我知道如何将数据传递给顶点,我基本上是指如何将2D图像文件转换为3D?)例如我发现这个很好的教程

www.opengl-tutorial.org/beginners-tutorials/tutorial-5-a-textured-cube /

那说:“好吧,我们必须在这里做同样的事情,但不是给出缓冲区(R,G,B)三元组,而是给出(U,V)对的缓冲区。 “

// Two UV coordinatesfor each vertex. They were created with Blender. You'll learn shortly how to do this yourself.
static const GLfloat g_uv_buffer_data[] = {
    0.000059f, 1.0f-0.000004f,
    0.000103f, 1.0f-0.336048f,
    0.335973f, 1.0f-0.335903f,
    1.000023f, 1.0f-0.000013f,
    0.667979f, 1.0f-0.335851f,
    0.999958f, 1.0f-0.336064f,
    0.667979f, 1.0f-0.335851f,
    0.336024f, 1.0f-0.671877f,
    0.667969f, 1.0f-0.671889f,
    1.000023f, 1.0f-0.000013f,
    0.668104f, 1.0f-0.000013f,
    0.667979f, 1.0f-0.335851f,
    0.000059f, 1.0f-0.000004f,
    0.335973f, 1.0f-0.335903f,
    0.336098f, 1.0f-0.000071f,
    0.667979f, 1.0f-0.335851f,
    0.335973f, 1.0f-0.335903f,
    0.336024f, 1.0f-0.671877f,
    1.000004f, 1.0f-0.671847f,
    0.999958f, 1.0f-0.336064f,
    0.667979f, 1.0f-0.335851f,
    0.668104f, 1.0f-0.000013f,
    0.335973f, 1.0f-0.335903f,
    0.667979f, 1.0f-0.335851f,
    0.335973f, 1.0f-0.335903f,
    0.668104f, 1.0f-0.000013f,
    0.336098f, 1.0f-0.000071f,
    0.000103f, 1.0f-0.336048f,
    0.000004f, 1.0f-0.671870f,
    0.336024f, 1.0f-0.671877f,
    0.000103f, 1.0f-0.336048f,
    0.336024f, 1.0f-0.671877f,
    0.335973f, 1.0f-0.335903f,
    0.667969f, 1.0f-0.671889f,
    1.000004f, 1.0f-0.671847f,
    0.667979f, 1.0f-0.335851f
};

我必须使用搅拌机吗? 我找到了http://www.opengl.org/wiki/Texturing_a_Sphere#2D_Texture_Mapping_a_Sphere 但我仍然无法理解

2 个答案:

答案 0 :(得分:1)

  

如何将图像文件坐标(UV)转换为3D坐标?

你没有。纹理不会使您的几何体。您还需要提供球体的坐标。不,您不必使用Blender,它只是用于导出一些顶点数据(位置,法线和纹理坐标)供以后使用。因此,除了UV缓冲区外,还应该有位置普通缓冲区。

答案 1 :(得分:1)

问题"如何将图像文件坐标(UV)转换为3D坐标?"声音反转。您应该问"如何将球体坐标(3D)转换为图像坐标(UV)?教程示例使用Blender创建静态数据的UV数据。如果要计算球体的顶点,则需要同时计算UV坐标并将它们存储在Vertex结构中。或者,如果您有一组预先计算的静态球体坐标,则需要计算一组类似的UV坐标。我们的想法是获取3D数据并将其传递给一个映射函数,该函数返回U,V坐标,纹理的像素将用于该3D点。

最简单的计算是使用极坐标(方位角,高度)并将其映射到U,V范围。如果正在计算球体3D顶点,则算法可能已经使用极坐标并调用trig函数来获得X,Y,Z。

opengl.org链接提到了' cubemap'是纹理球体的首选方法。 "为获得最佳效果,请使用立方体贴图。应用2D纹理的问题在于,当您将2D纹理包裹到球体,球体的顶部和底部区域时,纹理看起来会被挤压。"

您提出的2D地图在缠绕球体或其他3D曲线时始终会出现扭曲。可以调整U,V映射算法以最小化效果或隐藏主视线外部的接缝,但是球体旋转它们将出现在某处。