我正在尝试使用OpenGL ES在iPad上制作屏幕截图。这确实有效,但它们上面有空白点。这些空白点似乎是渲染对象。我也尝试过使用其他缓冲区,但它们似乎都不包含实际的3D对象?
我正在使用String SDK的示例代码。
问题图片:
The issue http://i48.tinypic.com/2q1f5lk.jpg
EAGLView.m
- (void)createFramebuffer
{
if (context && !defaultFramebuffer)
{
[EAGLContext setCurrentContext:context];
// Handle scale
if ([self respondsToSelector:@selector(setContentScaleFactor:)])
{
float screenScale = [UIScreen mainScreen].scale;
self.contentScaleFactor = screenScale;
}
// Create default framebuffer object.
glGenFramebuffers(1, &defaultFramebuffer);
glBindFramebuffer(GL_FRAMEBUFFER, defaultFramebuffer);
// Create color render buffer and allocate backing store.
glGenRenderbuffers(1, &colorRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
[context renderbufferStorage:GL_RENDERBUFFER fromDrawable:(CAEAGLLayer *)self.layer];
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &framebufferWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &framebufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, colorRenderbuffer);
// Create and attach depth buffer
glGenRenderbuffers(1, &depthRenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthRenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, framebufferWidth, framebufferHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthRenderbuffer);
// Bind color buffer
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
NSLog(@"Failed to make complete framebuffer object %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
}
}
截图代码
EAGLView.m
- (UIImage*)snapshot:(UIView*)eaglview
{
GLint backingWidth, backingHeight;
// Bind the color renderbuffer used to render the OpenGL ES view
// If your application only creates a single color renderbuffer which is already bound at this point,
// this call is redundant, but it is needed if you're dealing with multiple renderbuffers.
// Note, replace "_colorRenderbuffer" with the actual name of the renderbuffer object defined in your class.
//glBindRenderbufferOES(GL_RENDERBUFFER_OES, _colorRenderbuffer);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight);
NSInteger x = 0, y = 0, width = backingWidth, height = backingHeight;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
// Create a CGImage with the pixel data
// If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
// otherwise, use kCGImageAlphaPremultipliedLast
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, data, dataLength, NULL);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast,
ref, NULL, true, kCGRenderingIntentDefault);
// OpenGL ES measures data in PIXELS
// Create a graphics context with the target size measured in POINTS
NSInteger widthInPoints, heightInPoints;
if (NULL != UIGraphicsBeginImageContextWithOptions) {
// On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
// Set the scale parameter to your OpenGL ES view's contentScaleFactor
// so that you get a high-resolution snapshot when its value is greater than 1.0
CGFloat scale = eaglview.contentScaleFactor;
widthInPoints = width / scale;
heightInPoints = height / scale;
UIGraphicsBeginImageContextWithOptions(CGSizeMake(widthInPoints, heightInPoints), NO, scale);
}
else {
// On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
widthInPoints = width;
heightInPoints = height;
UIGraphicsBeginImageContext(CGSizeMake(widthInPoints, heightInPoints));
}
CGContextRef cgcontext = UIGraphicsGetCurrentContext();
// UIKit coordinate system is upside down to GL/Quartz coordinate system
// Flip the CGImage by rendering it to the flipped bitmap context
// The size of the destination area is measured in POINTS
CGContextSetBlendMode(cgcontext, kCGBlendModeCopy);
CGContextDrawImage(cgcontext, CGRectMake(0.0, 0.0, widthInPoints, heightInPoints), iref);
// Retrieve the UIImage from the current context
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Clean up
free(data);
CFRelease(ref);
CFRelease(colorspace);
CGImageRelease(iref);
return image;
}
String_OGL_TutorialViewController.m
- (void)render
{
[(EAGLView *)self.view setFramebuffer];
glDisable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
const int maxMarkerCount = 10;
struct MarkerInfoMatrixBased markerInfo[10];
int markerCount = [stringOGL getMarkerInfoMatrixBased: markerInfo maxMarkerCount: maxMarkerCount];
for (int i = 0; i < markerCount; i++)
{
float diffuse[4] = {0, 0, 0, 0};
diffuse[markerInfo[i].imageID % 3] = 1;
if ([context API] == kEAGLRenderingAPIOpenGLES2)
{
glUseProgram(program);
glUniform4fv(uniforms[UNIFORM_COLOR], 1, diffuse);
const float translationMatrix[16] = {1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, -cubeScale, 1};
float modelViewMatrix[16];
float modelViewProjectionMatrix[16];
[String_OGL_TutorialViewController multiplyMatrix: translationMatrix withMatrix: markerInfo[i].transform into: modelViewMatrix];
[String_OGL_TutorialViewController multiplyMatrix: modelViewMatrix withMatrix: projectionMatrix into: modelViewProjectionMatrix];
glUniformMatrix4fv(uniforms[UNIFORM_MVP], 1, GL_FALSE, modelViewProjectionMatrix);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, ((float *)NULL) + 6 * 4 * 3);
// Validate program before drawing. This is a good check, but only really necessary in a debug build.
// DEBUG macro must be defined in your debug configurations if that's not already the case.
#if defined(DEBUG)
if (![self validateProgram:program])
{
NSLog(@"Failed to validate program: %d", program);
return;
}
#endif
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, NULL);
}
else
{
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 12, NULL);
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, 12, ((float *)NULL) + 6 * 4 * 3);
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(projectionMatrix);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMultMatrixf(markerInfo[i].transform);
glTranslatef(0, 0, -cubeScale);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_BYTE, NULL);
}
UIImage *img = [(EAGLView *)self.view snapshot: self.view];
UIImageWriteToSavedPhotosAlbum(img, self, @selector(image:didFinishSavingWithError:contextInfo:), nil);
[stringOGL pause];
}
}