我想使用Open GL ES进行通用计算。
到目前为止,我了解的事情是我需要创建一个SSBO并在那里传输数据,将缓冲区绑定到着色器中的特殊点,运行着色器并返回数据。
到目前为止,我有3个问题:
我也正在编写要从Android应用程序启动的代码,因此从该侧可能会出现一些问题。我的最终任务是在该着色器上计算出一些东西,然后返回到Android应用。现在,它仅返回我用于调试的固定字符串。
这是我的代码:
#include <jni.h>
#include <string>
#include <GLES3/gl31.h>
//#include <GLES/egl.h>
static const char COMPUTE_SHADER[] =
"#version 310 es\n"
"layout(local_size_x = 128) in;\n"
"layout(std430) buffer;\n"
"layout(binding = 0) writeonly buffer Output {\n"
"vec4 elements[];\n"
"} output_data;\n"
"layout(binding = 1) readonly buffer Input0 {\n"
"vec4 elements[];\n"
"} input_data0;\n"
"void main()\n"
"{\n"
" uint ident = gl_GlobalInvocationID.x;\n"
"output_data.elements[ident] = input_data0.elements[ident] * input_data0.elements[ident];\n"
"}";
GLuint LoadShader(const char *shaderSrc)
{
GLuint shader;
GLint compiled;
// Create the shader object
shader = glCreateShader(GL_COMPUTE_SHADER);
if(shader == 0)
return shader;
// Load the shader source
glShaderSource(shader, 1, &shaderSrc, NULL);
// Compile the shader
glCompileShader(shader);
// Check the compile status
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if(!compiled)
{
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if(infoLen > 1)
{
char* infoLog = (char*)malloc(sizeof(char) * infoLen);
glGetShaderInfoLog(shader, infoLen, NULL, infoLog);
//esLogMessage("Error compiling shader:\n%s\n", infoLog);
free(infoLog);
}
glDeleteShader(shader);
return 0;
}
return shader;
}
extern "C" JNIEXPORT jstring
JNICALL
Java_appname_MainActivity_stringFromJNI(
JNIEnv *env,
jobject /* this */) {
// Maybe create a shader straight here
//prepare_data();
//GLuint tex[2];
char hello[100] = "hello";
GLuint data_buffer;
GLuint output_buffer;
uint32_t data[10] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
glGenBuffers(1, &data_buffer);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, data_buffer);
glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(uint32_t) * 10, (void*)data, GL_STREAM_COPY);
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 1, data_buffer);
glGenBuffers(0, &output_buffer);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, output_buffer);
//glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(uint32_t) * 10, (void*)calc_data, GL_STREAM_COPY);
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 0, output_buffer);
GLuint program = glCreateProgram();
GLuint shader = LoadShader(COMPUTE_SHADER);
glAttachShader(program, shader);
glLinkProgram(program);
glUseProgram(program);
glDispatchCompute(10,1,1);
GLuint *ptr = (GLuint *) glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, 10, GL_READ_ONLY );
GLuint info = ptr[ 0 ];
glUnmapBuffer( GL_SHADER_STORAGE_BUFFER );
sprintf(hello, "%d ", info);
glMemoryBarrier( GL_SHADER_STORAGE_BARRIER_BIT );
return env->NewStringUTF(hello);
}
答案 0 :(得分:0)
第一个问题是,您必须在缓冲区对象的规范中使用正确的数据类型(uint
):
layout(binding = 0) writeonly buffer Output
{
uint elements[];
} output_data;
layout(binding = 1) readonly buffer Input0
{
uint elements[];
} input_data0;
此外,您还可以通过glBufferData
创建并初始化缓冲区对象的数据存储区:
glGenBuffers(0, &output_buffer);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, output_buffer);
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 0, output_buffer);
glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(GLuint) * 10, nullptr, GL_DYNAMIC_READ);
这样做的话,如果您使用GL_MAP_READ_BIT
(而不是枚举常量GL_READ_ONLY
,则glMapBufferRange
对缓冲区的映射将起作用,这在此没有意义。完全是这样):
GLuint *ptr = (GLuint*)glMapBufferRange(
GL_SHADER_STORAGE_BUFFER, 0, sizeof(GLuint) * 10, GL_MAP_READ_BIT );