我想将纹理映射到对象(可能是立方体)。 但我不想使用图像。 我知道通常使用图像进行纹理映射,如下所示:
glGenTextures(1, &texName);
glBindTexture(GL_TEXTURE_2D, texName);
image = SOIL_load_image("cube.jpg", &width, &height, 0, SOIL_LOAD_RGB);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, image);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
但有没有使用图像的解决方案?
如果有色谱,我想将颜色信息存储在缓冲区中,并将此光谱映射为纹理。
如果有任何解决方案,请告诉我。
答案 0 :(得分:3)
如果要创建纹理,则必须先为颜色平面分配内存。 最简单的方法是创建一个RGBA纹理,因为对于一个纹素,需要正好4个字节(32位),您不必担心对齐。
在c ++中,我建议使用std::vector
:
int width = ...;
int height = ...;
std::vector<unsignd char> colorPlane( width * height * 4 ); // * 4 because of RGBA
但您也可以使用旧式动态内存分配:
unsignd char *colorPlane = new unsignd char[ width * height * 4 ];
平面内纹素的字节索引计算如下:
int posX = ...;
int posY = ...;
int index = (posY * width + posX) * 4;
如果要设置像素,则必须在[0,255]范围内指定正确的红色,绿色和蓝色通道。对于不透明的纹理元素,您必须设置255的alpha通道:
例如:设置红色:
colorPlane[index + 0] = 255; // red component
colorPlane[index + 1] = 0; // green component
colorPlane[index + 2] = 0; // blue component
colorPlane[index + 3] = 255; // alpha channel (255 == opaque)
最后,您必须将颜色平面设置为纹理。
// std::vector<unsigned char>
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, colorPlane.data() );
// unsigned char*
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, colorPlane );
您可以通过插入颜色分量来创建渐变纹理。 请参阅以下示例
for ( int iy = 0; iy < height; ++ iy )
{
for ( int ix = 0; ix < width; ++ ix )
{
int index = (iy * width + ix) * 4;
float gradX = (float)ix / width;
float gradY = (float)iy / height;
colorPlane[index + 0] = (unsigned char)(255.0 * (1.0-gradX));
colorPlane[index + 1] = (unsigned char)(255.0 * (1.0-gradY));
colorPlane[index + 2] = (unsigned char)(255.0 * gradX * gradY);
colorPlane[index + 3] = 255;
}
}
可以按如下方式创建彩虹纹理:
for ( int iy = 0; iy < height; ++ iy )
{
for ( int ix = 0; ix < width; ++ ix )
{
int index = (iy * width + ix) * 4;
float H = 1.0f - (float)iy / height;
float R = fabs(H * 4.0f - 3.0f) - 1.0f;
float G = 2.0f - fabs(H * 4.0f - 2.0f);
float B = 2.0f - fabs(H * 4.0f - 4.0f);
colorPlane[index + 0] = (unsigned char)(255.0 * R);
colorPlane[index + 1] = (unsigned char)(255.0 * G);
colorPlane[index + 2] = (unsigned char)(255.0 * B);
colorPlane[index + 3] = 255;
}
}
请参阅以下WebGL示例:
var ShaderProgram = {};
ShaderProgram.Create = function( shaderList, uniformNames ) {
var shaderObjs = [];
for ( var i_sh = 0; i_sh < shaderList.length; ++ i_sh ) {
var shderObj = this.CompileShader( shaderList[i_sh].source, shaderList[i_sh].stage );
if ( shderObj == 0 )
return 0;
shaderObjs.push( shderObj );
}
var progObj = this.LinkProgram( shaderObjs )
if ( progObj != 0 ) {
progObj.unifomLocation = {};
for ( var i_n = 0; i_n < uniformNames.length; ++ i_n ) {
var name = uniformNames[i_n];
progObj.unifomLocation[name] = gl.getUniformLocation( progObj, name );
}
}
return progObj;
}
ShaderProgram.Use = function( progObj ) { gl.useProgram( progObj ); }
ShaderProgram.SetUniformInt = function( progObj, name, val ) { gl.uniform1i( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniformFloat = function( progObj, name, val ) { gl.uniform1f( progObj.unifomLocation[name], val ); }
ShaderProgram.SetUniform2f = function( progObj, name, arr ) { gl.uniform2fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniform3f = function( progObj, name, arr ) { gl.uniform3fv( progObj.unifomLocation[name], arr ); }
ShaderProgram.SetUniformMat44 = function( progObj, name, mat ) { gl.uniformMatrix4fv( progObj.unifomLocation[name], false, mat ); }
ShaderProgram.CompileShader = function( source, shaderStage ) {
var shaderScript = document.getElementById(source);
if (shaderScript) {
source = "";
var node = shaderScript.firstChild;
while (node) {
if (node.nodeType == 3) source += node.textContent;
node = node.nextSibling;
}
}
var shaderObj = gl.createShader( shaderStage );
gl.shaderSource( shaderObj, source );
gl.compileShader( shaderObj );
var status = gl.getShaderParameter( shaderObj, gl.COMPILE_STATUS );
if ( !status ) alert(gl.getShaderInfoLog(shaderObj));
return status ? shaderObj : 0;
}
ShaderProgram.LinkProgram = function( shaderObjs ) {
var prog = gl.createProgram();
for ( var i_sh = 0; i_sh < shaderObjs.length; ++ i_sh )
gl.attachShader( prog, shaderObjs[i_sh] );
gl.linkProgram( prog );
status = gl.getProgramParameter( prog, gl.LINK_STATUS );
if ( !status ) alert("Could not initialise shaders");
gl.useProgram( null );
return status ? prog : 0;
}
function drawScene(){
var canvas = document.getElementById( "ogl-canvas" );
var vp = [canvas.width, canvas.height];
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
ShaderProgram.Use( progDraw );
gl.enableVertexAttribArray( progDraw.inPos );
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
gl.disableVertexAttribArray( progDraw.pos );
}
var gl;
var prog;
var bufObj = {};
function sceneStart() {
var canvas = document.getElementById( "ogl-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = ShaderProgram.Create(
[ { source : "draw-shader-vs", stage : gl.VERTEX_SHADER },
{ source : "draw-shader-fs", stage : gl.FRAGMENT_SHADER }
],
[] );
progDraw.inPos = gl.getAttribLocation( progDraw, "inPos" );
if ( prog == 0 )
return;
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
setInterval(drawScene, 50);
}
<script id="draw-shader-vs" type="x-shader/x-vertex">
precision mediump float;
attribute vec2 inPos;
varying vec2 vertPos;
void main()
{
vertPos = inPos;
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 vertPos;
vec3 HueToRGB(in float H)
{
float R = abs(H * 4.0 - 3.0) - 1.0;
float G = 2.0 - abs(H * 4.0 - 2.0);
float B = 2.0 - abs(H * 4.0 - 4.0);
return clamp( vec3(R,G,B), 0.0, 1.0 );
}
void main()
{
vec3 color = HueToRGB( vertPos.y * 0.5 + 0.5 );
gl_FragColor = vec4( color, 1.0 );
}
</script>
<body onload="sceneStart();">
<canvas id="ogl-canvas" style="border: none;" width="200" height="200"></canvas>
</body>
答案 1 :(得分:0)
这在片段(像素)着色器中是可能的。无论如何 - 如果原始渐变数据将作为纹理呈现给具有附加参数的着色器(如线性渐变点或径向渐变参数),则它将更好地工作。
但是,您仍然将其作为统一缓冲区或纹理缓冲区对象传递。有了这个,你就可以放松内置的纹理采样和过滤功能,这几乎可以为你做渐变。
看一下着色器玩具(https://www.shadertoy.com) - 那里有大量没有纹理的样本,并在着色器程序中生成模式。