OSX +屏幕外渲染+ CoreGL + framebuffers + shaders =头痛?

时间:2016-05-06 17:08:52

标签: c++ macos opengl shader framebuffer

上下文与此相同:Apple OSX OpenGL offline rendering with CoreGL

我写了一个非常小的C ++ / OpenGL程序(我们实际上无法想象更简单),它应该在着色器的帮助下在屏幕外绘制一个白色三角形......但事实并非如此。我基本上使用一个framebuffer / renderbuffer链接到CoreGL上下文中的分配缓冲区,灵感来自这里:http://renderingpipeline.com/2012/05/windowless-opengl-on-macos-x/。然后,用缓冲区的内容写入位图图片。

结果很奇怪。我使用glClearColor命令在图片上显示正确的背景颜色。这让我觉得CoreGL上下文已经成功初始化了......但遗憾的是我没有得到更多(没有三角形)。

没有着色器编译/链接问题。我的framebuffers / VBO / VAO /顶点属性绑定没有问题......但是就像我的着色器与CoreGL上下文不相符。

以下是定义和编译着色器的函数:

int loadShader()
{
  const GLchar* vertexSource = 
    "#version 150\n"
    "in vec3 position;"
    "void main(void)"
    "{ gl_Position = vec4(position, 1.); }";

  const GLchar* fragmentSource =
    "#version 150\n"
    "out vec4 fragColor;"
    "void main(void)"
    "{ fragColor = vec4(1.); }";

  vertexID = glCreateShader(GL_VERTEX_SHADER);
  fragmentID = glCreateShader(GL_FRAGMENT_SHADER);

  glShaderSource(vertexID, 1, &vertexSource, NULL);
  glShaderSource(fragmentID, 1, &fragmentSource, NULL);

  glCompileShader(vertexID);
  glCompileShader(fragmentID);

  if (!checkShaderCompilation(vertexID) || !checkShaderCompilation(fragmentID))
    throw std::runtime_error("bad shader compilation");

  programID = glCreateProgram();
  if (programID == 0)
    throw std::runtime_error("unable to create shader program");

  glAttachShader(programID, vertexID);
  glAttachShader(programID, fragmentID);
  glLinkProgram(programID);

  GLint programState = 0;
  glGetProgramiv(programID, GL_LINK_STATUS, &programState);
  if (programState != GL_TRUE)
    throw std::runtime_error("bad shader link");

  glUseProgram(programID);

  return 0;
}

这里,渲染功能:

GLfloat triangleVertices[] =
  {
      0.0f , 0.5f, 0.0f,    
      0.5f, -0.5f, 0.0f,
     -0.5f, -0.5f, 0.0f
  };

void displayTriangle()
{
  glClearColor(0.3, 0.1, 0.1, 1.);
  glPointSize(40.f);

  glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

  loadShader();

  GLint fragmentGPUProcessing, vertexGPUProcessing;
  CGLGetParameter(CGLGetCurrentContext(), kCGLCPGPUFragmentProcessing, &fragmentGPUProcessing);
  CGLGetParameter(CGLGetCurrentContext(), kCGLCPGPUVertexProcessing, &vertexGPUProcessing);

  if (!fragmentGPUProcessing || !vertexGPUProcessing)
    throw std::runtime_error("vertex and fragment shaders seem not correctly bound!");

  GLuint vaoID, vboID;
  glGenVertexArrays(1, &vaoID);
  glBindVertexArray(vaoID);

  glGenBuffers(1, &vboID);
  glBindBuffer(GL_ARRAY_BUFFER, vboID);
  glBufferData(GL_ARRAY_BUFFER, sizeof(triangleVertices), triangleVertices, GL_STATIC_DRAW);

  positionID = glGetAttribLocation(programID, "position");
  if (positionID < 0)
    throw std::runtime_error("unable to find 'position' name in vertex shader!");

  glVertexAttribPointer(positionID, 3, GL_FLOAT, GL_TRUE, 0, NULL);
  glEnableVertexAttribArray(positionID);

  glDrawArrays(GL_TRIANGLES, 0, 3);

  glFinish();
}

和main(),定义CoreGL上下文和帧缓冲对象: (请注意 OpenGL 3.2核心配置文件已设置)

int main(int argc, char** argv)
{
  CGLContextObj context;

  CGLPixelFormatAttribute attributes[4] = { kCGLPFAOpenGLProfile, (CGLPixelFormatAttribute)kCGLOGLPVersion_3_2_Core,
                        kCGLPFAAccelerated,  // no software rendering
                        (CGLPixelFormatAttribute)0
  };

  CGLPixelFormatObj pix;
  GLint num;
  CGLError errorCode = CGLChoosePixelFormat(attributes, &pix, &num);
  if (errorCode != kCGLNoError)
    throw std::runtime_error("CGLChoosePixelFormat failure");

  errorCode = CGLCreateContext(pix, NULL, &context);
  if (errorCode != kCGLNoError)
    throw std::runtime_error("CGLCreateContext failure");

  CGLDestroyPixelFormat(pix);

  errorCode = CGLSetCurrentContext(context);
  if (errorCode != kCGLNoError)
    throw std::runtime_error("CGLSetCurrentContext failure");

  GLuint framebufferID, renderbufferID;
  glGenFramebuffers(1, &framebufferID);
  glBindFramebuffer(GL_FRAMEBUFFER, framebufferID);

  glGenRenderbuffers(1, &renderbufferID);
  glBindRenderbuffer(GL_RENDERBUFFER, renderbufferID);
  glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB8, WIDTH, HEIGHT);

  glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbufferID);

  if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
    throw std::runtime_error("framebuffer is not complete!");

  displayTriangle();

  GLubyte* buffer = new GLubyte[WIDTH * HEIGHT * 3 * sizeof(GLubyte)];
  glReadPixels(0, 0, WIDTH, HEIGHT, GL_RGB, GL_UNSIGNED_BYTE, buffer);

  writeBitmap("path/to/the/bitmap/file", buffer, WIDTH, HEIGHT);

  glDeleteRenderbuffers(1, &renderbufferID);
  glDeleteFramebuffers(1, &framebufferID);

  CGLSetCurrentContext(NULL);
  CGLDestroyContext(context);

  delete[] buffer;

  return 0;
}

怎么了?

1 个答案:

答案 0 :(得分:0)

我终于设法解决了这个问题。我实际上忘了用这个命令设置视口:

glViewport(0, 0, WIDTH, HEIGHT);

现在,一切看起来都很好,我的着色器正确使用。

这个代码示例已经简化为最基本的形式,实际上是跨平台屏幕外渲染项目的一部分。问题是,在 OSMesa 库的帮助下,我之前已经在Linux上使用了它。这意味着OSMesa不需要视口初始化来执行与CoreGL相反的正确渲染。很高兴知道!