OpenGL中的两个函数 glframebuffertexture 和 glbindtexture 之间的确切用法差异是什么。
为了进入上下文,我使用OpenTK为嵌入式机器使用OpenGL编写自定义轻量级UI,该机器使用大量的帧缓冲区和UI中不同组件的相应纹理。在内部,使用堆栈跟踪帧缓冲区,因为缓冲区内容可以被绘制到“父”缓冲区中(或者如果屏幕空间没有父级)。
我很难尝试以正确的顺序接听电话,我想知道当我需要将纹理绑定到帧缓冲区时以及何时需要将纹理分配给帧缓冲区时我是否感到困惑。
我已经阅读过关于这些函数的文档,但它们并没有真正解释这些函数在其上下文中与帧缓冲区之间的关系。
我基本上没有被渲染回屏幕。
例如,如果我使用SharpFont(FreeType绑定库)渲染文本,我有一个帧缓冲区用于整个渲染字符串,一个帧缓冲区用于渲染每个字符。
这是我相信在这个例子中进行调用的一般顺序
从FB0(屏幕空间)开始,
创建FB 1,
使用TX 1设置FB 1(绑定到FB 1)(创建然后绑定然后附加),
回到FB0,
绑定到FB1(开始渲染),
创建FB2,
使用TX 2设置FB 2(绑定到FB 2)(创建然后绑定然后附加),(包括渲染的字符像素数据),
绑定回FB1,
绑定到TX2
使用quad
在7处重复下一个字符,直到结束
绑定回FB0
使用quad。渲染TX1(假设)。
FrameBuffer(UI对象)
public class OGLFrameBuffer : IFrameBuffer {
private int frameBufferId = -1;
private int frameBufferTexture = -1;
private IDrawer ctx;
public OGLFrameBuffer(IDrawer dCtx) { ctx = dCtx; }
public void SetupFrameBuffer(float width, float height) {
//Generate Frame Buffer if not exists;
if (frameBufferId == -1) {
frameBufferId = GL.GenFramebuffer();
}
//set current frame buffer
ctx.PushUseFrameBuffer(this, new Size((int)Math.Ceiling(width), (int)Math.Ceiling(height)));
if (frameBufferTexture != -1)
GL.DeleteTexture(frameBufferTexture);
//Generate Texture with buffer size;
frameBufferTexture = GL.GenTexture();
GL.BindTexture(TextureTarget.Texture2D, frameBufferTexture);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapS, (int)TextureWrapMode.Repeat);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapT, (int)TextureWrapMode.Repeat);
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, (int)Math.Ceiling(width), (int)Math.Ceiling(height), 0, PixelFormat.Bgra, PixelType.UnsignedByte, IntPtr.Zero);
GL.FramebufferTexture(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment0, frameBufferTexture, 0);
//return to previous buffer
ctx.PopFrameBuffer();
}
public void SetupFrameBuffer(float width, float height, byte[] data, VIPixelFormat pixelFormat) {
PixelFormat oglPixFmt;
switch (pixelFormat) {
case VIPixelFormat.VIPF_RGBA:
oglPixFmt = PixelFormat.Bgra;
break;
case VIPixelFormat.VIPF_GREYSCALE:
oglPixFmt = PixelFormat.Red;
break;
case VIPixelFormat.VIPF_RGB:
default:
oglPixFmt = PixelFormat.Bgr;
break;
}
//Generate Frame Buffer if not exists;
if (frameBufferId == -1) {
frameBufferId = GL.GenFramebuffer();
}
//set current frame buffer
ctx.PushUseFrameBuffer(this, new Size((int)Math.Ceiling(width), (int)Math.Ceiling(height)));
if (frameBufferTexture != -1)
GL.DeleteTexture(frameBufferTexture);
//Generate Texture with buffer size;
frameBufferTexture = GL.GenTexture();
GL.BindTexture(TextureTarget.Texture2D, frameBufferTexture);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapS, (int)TextureWrapMode.Repeat);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapT, (int)TextureWrapMode.Repeat);
if (pixelFormat == VIPixelFormat.VIPF_GREYSCALE) {
//GL.ClearColor(1f, 1f, 1f, 1f);
//GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
}
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, (int)Math.Ceiling(width), (int)Math.Ceiling(height), 0, oglPixFmt, PixelType.UnsignedByte, data);
GL.FramebufferTexture(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment0, frameBufferTexture, 0);
//return to previous buffer
ctx.PopFrameBuffer();
}
public int GetFrameBufferId() {
return frameBufferId;
}
public int GetFrameBufferTexture() {
return frameBufferTexture;
}
}
相关上下文调用:
public void PushUseFrameBuffer(IFrameBuffer buf, Size bufferDims) {
frameBufferStack.Push(buf);
OGLFrameBuffer frameBuf = (OGLFrameBuffer)buf;
GL.BindFramebuffer(FramebufferTarget.Framebuffer, frameBuf.GetFrameBufferId());
ctxbounds = new Rectangle(new Point(0, 0), bufferDims);
}
public void PopFrameBuffer() {
frameBufferStack.Pop();
if (frameBufferStack.Count != 0) {
OGLFrameBuffer frameBuf = (OGLFrameBuffer)frameBufferStack.Peek();
GL.BindFramebuffer(FramebufferTarget.Framebuffer, frameBuf.GetFrameBufferId());
}
else {
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
}
ctxbounds = windowbounds;
}
public void DrawBuffer(IFrameBuffer buff, RectangleF textureBounds, Size parentBounds) {
OGLFrameBuffer toDrawFb = (OGLFrameBuffer)buff;
GL.BindTexture(TextureTarget.Texture2D, toDrawFb.GetFrameBufferTexture());
GL.Begin(PrimitiveType.Quads);
GL.TexCoord2(0.0f, 1.0f); GL.Vertex2((textureBounds.X * scale) / parentBounds.Width , (textureBounds.Y * scale) / parentBounds.Height);
GL.TexCoord2(1.0f, 1.0f); GL.Vertex2(((textureBounds.X + textureBounds.Width) * scale) / parentBounds.Width , (textureBounds.Y * scale) / parentBounds.Height);
GL.TexCoord2(1.0f, 0.0f); GL.Vertex2(((textureBounds.X + textureBounds.Width) * scale) / parentBounds.Width , ((textureBounds.Y + textureBounds.Height) * scale) / parentBounds.Height);
GL.TexCoord2(0.0f, 0.0f); GL.Vertex2((textureBounds.X * scale) / parentBounds.Width , ((textureBounds.Y + textureBounds.Height) * scale) / parentBounds.Height);
GL.End();
OGLFrameBuffer curFb = (OGLFrameBuffer)GetCurrentFrameBuffer();
if (curFb != null) {
GL.BindTexture(TextureTarget.Texture2D, curFb.GetFrameBufferTexture());
}
else {
GL.BindTexture(TextureTarget.Texture2D, 0);
}
SetColour(1, 1, 1, 1f);
DrawRectangle(textureBounds);
}
答案 0 :(得分:2)
glFrameBufferTexture将纹理与帧缓冲区关联为渲染目标。
这意味着当你发出一个drawcall时,这个纹理将由你的片段着色器的输出写入(如果启用了混合)。
GlBindTexture告诉OpenGL您想要读取此纹理(例如通过sampler2D)
简单来说:glFrameBufferTexture允许你渲染(或写入)这个纹理而不是你自己的屏幕,glBindTexture允许你读取纹理内的数据(它可能是你写的纹理感谢帧缓冲,或纹理你在加载图片时写道。)
编辑:更一般地说,在OpenGL中绑定一个对象告诉OpenGL它将使用该对象进行所有后续操作。
当你想要一个glTexParameter时,你必须先绑定你正在处理的纹理。或者使用DSA; glTextureParameter /
对于缓冲区和其他对象,它是相同的