我正在尝试用相机捕获并将帧记录为android Oreo中的mp4文件。
我使用NDK实现了捕获进度。
之后,我确实创建了预览图面(GLSurfaceview)并从编码器(Mediacodec)获取输入图面。
但是,我不知道如何将捕获的帧渲染到编码器的输入表面。
/* This is onDrawFrame of Renderer in GLSurfaceView instance
* The native function onDrawFrame(texMatrix) is called */
@Override
public void onDrawFrame(GL10 gl) {
synchronized (lock) {
if ( frameAvailable ) {
Log.d("yoo", "Frame availablee...updating");
surfaceTexture.updateTexImage();
surfaceTexture.getTransformMatrix(texMatrix);
frameAvailable = false;
}
}
onDrawFrame(texMatrix);
}
/* This is drawing function in NDK part that is executed when onDrawFrame(texMatrix) is called */
static void drawFrame ( JNIEnv* env, jfloatArray texMatArray )
{
LOGD("DrawFrame called");
glClear ( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT );
glClearColor ( 0, 0, 0, 1 );
glUseProgram ( prog );
// Configure main transformations
float mvp[] = {
1.0f, 0, 0, 0,
0, 1.0f, 0, 0,
0, 0, 1.0f, 0,
0, 0, 0, 1.0f
};
// give mvp data to shader
glUniformMatrix4fv ( mvpMatrix, 1, false, mvp );
// Prepare texture for drawing
glActiveTexture ( GL_TEXTURE0 );
glBindTexture ( GL_TEXTURE_EXTERNAL_OES, textureId );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
// Pass SurfaceTexture transformations to shader
float* tm = env->GetFloatArrayElements ( texMatArray, 0 );
glUniformMatrix4fv ( texMatrix, 1, false, tm );
env->ReleaseFloatArrayElements ( texMatArray, tm, 0 );
// Set the SurfaceTexture sampler
glUniform1i ( texSampler, 0 );
// specify color to mix with camera frames
float c[] = { 1, 1, 1, 1 };
glUniform4fv ( color, 1, (GLfloat*)c );
// Size of the window is used in fragment shader
// to split the window
float sz[2] = {0};
sz[0] = width/2;
sz[1] = height/2;
glUniform2fv ( size, 1, (GLfloat*)sz );
// Set up vertices/indices and draw
glBindBuffer ( GL_ARRAY_BUFFER, buf[0] );
glBindBuffer ( GL_ELEMENT_ARRAY_BUFFER, buf[1] );
glEnableVertexAttribArray ( vtxPosAttrib ) ;
glVertexAttribPointer ( vtxPosAttrib, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 5, (void*)0 );
glEnableVertexAttribArray ( uvsAttrib );
glVertexAttribPointer ( uvsAttrib, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 5, (void*)(3 * sizeof(float) ) );
glViewport ( 0, 0, width, height );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glDrawElements ( GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0 );
}
实际上,我尝试创建Mediacodec的编码器实例,并接收其输入表面并渲染捕获的帧。 (我指的是格拉菲卡的佳作。)
但是,我不知道在将捕获的帧渲染到GLSurfaceView之后如何渲染捕获的帧的方法。
-EDIT-
我尝试使用编码器的输入表面创建EGLSurface,但显示以下错误消息。
android.opengl.EGL14.eglCreateWindowSurface(mEGLCore.getDisplay(), mEGLCore.getConfig(), mEncoderSurface, surfaceAttribs, 0);
/* error message */
E/BufferQueueProducer: [GraphicBufferSource] connect: BufferQueue has been abandoned
E/libEGL: eglCreateWindowSurface: native_window_api_connect (win=0x92b7f808) failed (0xffffffed) (already connected to another API?)
E/libEGL: eglCreateWindowSurface:693 error 3003 (EGL_BAD_ALLOC)
任何答复将不胜感激。