iPhone使用AVCaptureVideoPreviewLayer获取增强现实截图

时间:2012-01-24 01:01:50

标签: iphone objective-c ios camera avfoundation

我有一个小型增强现实应用程序,我正在开发,并想知道如何通过点击按钮或计时器来保存用户看到的屏幕截图。

该应用程序通过将实时相机Feed叠加在另一个UIView上方来工作。我可以使用电源按钮+主页按钮保存屏幕截图,这些保存到相机胶卷。但是,即使我要求窗口保存,Apple也不会渲染AVCaptureVideoPreviewLayer。它将创建一个透明的画布,其中预览图层为。

增强现实应用程序保存屏幕截图(包括透明度和子视图)的正确方法是什么?

//displaying a live preview on one of the views
    -(void)startCapture
{
        captureSession = [[AVCaptureSession alloc] init];
        AVCaptureDevice *audioCaptureDevice = nil;

        //           AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];

        NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];

        for (AVCaptureDevice *device in videoDevices) {

            if(useFrontCamera){
                if (device.position == AVCaptureDevicePositionFront) {
                    //FRONT-FACING CAMERA EXISTS
                    audioCaptureDevice = device;
                    break;
                }
            }else{
                if (device.position == AVCaptureDevicePositionBack) {
                    //Rear-FACING CAMERA EXISTS
                    audioCaptureDevice = device;
                    break;
                }
            }

        }


        NSError *error = nil;
        AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];
        if (audioInput) {
            [captureSession addInput:audioInput];
        }
        else {
            // Handle the failure.
        }


if([captureSession canAddOutput:captureOutput]){
 captureOutput = [[AVCaptureVideoDataOutput alloc] init];
    [captureOutput setAlwaysDiscardsLateVideoFrames:YES];
    [captureOutput setSampleBufferDelegate:self queue:queue];
    [captureOutput setVideoSettings:videoSettings];

    dispatch_release(queue);
}else{
//handle failure
}

        previewLayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
        UIView *aView = arOverlayView;
        previewLayer.frame =CGRectMake(0,0, arOverlayView.frame.size.width,arOverlayView.frame.size.height); // Assume you want the preview layer to fill the view.

        [aView.layer addSublayer:previewLayer];
        [captureSession startRunning];


}

//ask the entire window to draw itself in a graphics context. This call will not render

// AVCaptureVideoPreviewLayer。它必须替换为基于UIImageView或GL的视图。 //请参阅以下代码以创建动态更新的UIImageView      - (无效)saveScreenshot     {

   UIGraphicsBeginImageContext(appDelegate.window.bounds.size);

    [appDelegate.window.layer renderInContext:UIGraphicsGetCurrentContext()];

    UIImage *screenshot = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();

    UIImageWriteToSavedPhotosAlbum(screenshot, self, 
                                   @selector(image:didFinishSavingWithError:contextInfo:), nil);


}


//image saved to camera roll callback
- (void)image:(UIImage *)image didFinishSavingWithError:(NSError *)error 
  contextInfo:(void *)contextInfo
{
    // Was there an error?
    if (error != NULL)
    {
        // Show error message...
         NSLog(@"save failed");

    }
    else  // No errors
    {
         NSLog(@"save successful");
        // Show message image successfully saved
    }
}

以下是创建图片的代码:

//您需要将视图控制器添加为摄像机输出的委托,以获得有关传输数据的通知

-(void)activateCameraFeed
{
//this is the code responsible for capturing feed for still image processing
 dispatch_queue_t queue = dispatch_queue_create("com.AugmentedRealityGlamour.ImageCaptureQueue", NULL);

    captureOutput = [[AVCaptureVideoDataOutput alloc] init];
    [captureOutput setAlwaysDiscardsLateVideoFrames:YES];
    [captureOutput setSampleBufferDelegate:self queue:queue];
    [captureOutput setVideoSettings:videoSettings];

    dispatch_release(queue);

//......configure audio feed, add inputs and outputs

}

//buffer delegate callback
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
    if ( ignoreImageStream )
        return;
       [self performImageCaptureFrom:sampleBuffer];

} 

创建UIImage:

- (void) performImageCaptureFrom:(CMSampleBufferRef)sampleBuffer
{
    CVImageBufferRef imageBuffer;

    if ( CMSampleBufferGetNumSamples(sampleBuffer) != 1 )
        return;
    if ( !CMSampleBufferIsValid(sampleBuffer) )
        return;
    if ( !CMSampleBufferDataIsReady(sampleBuffer) )
        return;

    imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 

    if ( CVPixelBufferGetPixelFormatType(imageBuffer) != kCVPixelFormatType_32BGRA )
        return;

    CVPixelBufferLockBaseAddress(imageBuffer,0); 

    uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
    size_t width = CVPixelBufferGetWidth(imageBuffer);
    size_t height = CVPixelBufferGetHeight(imageBuffer); 

    CGImageRef newImage = nil;

    if ( cameraDeviceSetting == CameraDeviceSetting640x480 )
    {
        CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
        CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
        newImage = CGBitmapContextCreateImage(newContext);
        CGColorSpaceRelease( colorSpace );
        CGContextRelease(newContext);
    }
    else
    {
        uint8_t *tempAddress = malloc( 640 * 4 * 480 );
        memcpy( tempAddress, baseAddress, bytesPerRow * height );
        baseAddress = tempAddress;
        CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
        CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace,  kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst);
        newImage = CGBitmapContextCreateImage(newContext);
        CGContextRelease(newContext);
        newContext = CGBitmapContextCreate(baseAddress, 640, 480, 8, 640*4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
        CGContextScaleCTM( newContext, (CGFloat)640/(CGFloat)width, (CGFloat)480/(CGFloat)height );
        CGContextDrawImage(newContext, CGRectMake(0,0,640,480), newImage);
        CGImageRelease(newImage);
        newImage = CGBitmapContextCreateImage(newContext);
        CGColorSpaceRelease( colorSpace );
        CGContextRelease(newContext);
        free( tempAddress );
    }

    if ( newImage != nil )
    {

//modified for iOS5.0 with ARC    
    tempImage =  [[UIImage alloc] initWithCGImage:newImage scale:(CGFloat)1.0 orientation:cameraImageOrientation];
    CGImageRelease(newImage);

//this call creates the illusion of a preview layer, while we are actively switching images created with this method
        [self performSelectorOnMainThread:@selector(newCameraImageNotification:) withObject:tempImage waitUntilDone:YES];
    }

    CVPixelBufferUnlockBaseAddress(imageBuffer,0);
}

使用可实际在图形上下文中呈现的UIView更新界面:

- (void) newCameraImageNotification:(UIImage*)newImage
{
    if ( newImage == nil )
        return;

        [arOverlayView setImage:newImage];
//or do more advanced processing of the image
}

1 个答案:

答案 0 :(得分:5)

如果您想要在屏幕上显示内容的快照,这就是我在其中一个相机应用中所做的事情。我很久没有触及这段代码了,所以现在可能有更好的5.0版本,但是下载量已超过100万。有一个用于抓取基于UIView的屏幕的功能和一个用于抓取Open / GLES1屏幕的功能:

//
//  ScreenCapture.m
//  LiveEffectsCam
//
//  Created by John Carter on 10/8/10.
//

#import "ScreenCapture.h"

#import <QuartzCore/CABase.h>
#import <QuartzCore/CATransform3D.h>
#import <QuartzCore/CALayer.h>
#import <QuartzCore/CAScrollLayer.h>

#import <OpenGLES/EAGL.h>
#import <OpenGLES/ES1/gl.h>
#import <OpenGLES/ES1/glext.h>
#import <QuartzCore/QuartzCore.h>
#import <OpenGLES/EAGLDrawable.h>


@implementation ScreenCapture

+ (UIImage *) GLViewToImage:(GLView *)glView
{
    UIImage *glImage = [GLView snapshot:glView]; // returns an autoreleased image
    return glImage;
}

+ (UIImage *) GLViewToImage:(GLView *)glView withOverlayImage:(UIImage *)overlayImage
{
    UIImage *glImage = [GLView snapshot:glView]; // returns an autoreleased image

    // Merge Image and Overlay
    //
    CGRect imageRect = CGRectMake((CGFloat)0.0, (CGFloat)0.0, glImage.size.width*glImage.scale, glImage.size.height*glImage.scale);
    CGImageRef overlayCopy = CGImageCreateCopy( overlayImage.CGImage );
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(NULL, (int)glImage.size.width*glImage.scale, (int)glImage.size.height*glImage.scale, 8, (int)glImage.size.width*4*glImage.scale, colorSpace, kCGImageAlphaPremultipliedLast);
    CGContextDrawImage(context, imageRect, glImage.CGImage);
    CGContextDrawImage(context, imageRect, overlayCopy);
    CGImageRef newImage = CGBitmapContextCreateImage(context);
    UIImage *combinedViewImage = [[[UIImage alloc] initWithCGImage:newImage] autorelease];
    CGImageRelease(newImage);
    CGImageRelease(overlayCopy);
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);

    return combinedViewImage;
}

+ (UIImage *) UIViewToImage:(UIView *)view withOverlayImage:(UIImage *)overlayImage
    {
    UIImage *viewImage = [ScreenCapture UIViewToImage:view]; // returns an autoreleased image

    // Merge Image and Overlay
    //
    CGRect imageRect = CGRectMake((CGFloat)0.0, (CGFloat)0.0, viewImage.size.width*viewImage.scale, viewImage.size.height*viewImage.scale);
    CGImageRef overlayCopy = CGImageCreateCopy( overlayImage.CGImage );
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(NULL, (int)viewImage.size.width*viewImage.scale, (int)viewImage.size.height*viewImage.scale, 8, (int)viewImage.size.width*4*viewImage.scale, colorSpace, kCGImageAlphaPremultipliedLast);
    CGContextDrawImage(context, imageRect, viewImage.CGImage);
    CGContextDrawImage(context, imageRect, overlayCopy);
    CGImageRef newImage = CGBitmapContextCreateImage(context);
    UIImage *combinedViewImage = [[[UIImage alloc] initWithCGImage:newImage] autorelease];
    CGImageRelease(newImage);
    CGImageRelease(overlayCopy);
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);

    return combinedViewImage;
}



+ (UIImage *) UIViewToImage:(UIView *)view
{
    // Create a graphics context with the target size
    // On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
    // On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
    //
    // CGSize imageSize = [[UIScreen mainScreen] bounds].size;
    CGSize imageSize = CGSizeMake( (CGFloat)480.0, (CGFloat)640.0 );        // camera image size

    if (NULL != UIGraphicsBeginImageContextWithOptions)
        UIGraphicsBeginImageContextWithOptions(imageSize, NO, 0);
    else
        UIGraphicsBeginImageContext(imageSize);

    CGContextRef context = UIGraphicsGetCurrentContext();

    // Start with the view...
    //
    CGContextSaveGState(context);
    CGContextTranslateCTM(context, [view center].x, [view center].y);
    CGContextConcatCTM(context, [view transform]);
    CGContextTranslateCTM(context,-[view bounds].size.width * [[view layer] anchorPoint].x,-[view bounds].size.height * [[view layer] anchorPoint].y);
    [[view layer] renderInContext:context];
    CGContextRestoreGState(context);

    // ...then repeat for every subview from back to front
    //
    for (UIView *subView in [view subviews]) 
    {
        if ( [subView respondsToSelector:@selector(screen)] )
            if ( [(UIWindow *)subView screen] == [UIScreen mainScreen] )
                continue;

        CGContextSaveGState(context);
        CGContextTranslateCTM(context, [subView center].x, [subView center].y);
        CGContextConcatCTM(context, [subView transform]);
        CGContextTranslateCTM(context,-[subView bounds].size.width * [[subView layer] anchorPoint].x,-[subView bounds].size.height * [[subView layer] anchorPoint].y);
        [[subView layer] renderInContext:context];
        CGContextRestoreGState(context);
    }

    UIImage *image = UIGraphicsGetImageFromCurrentImageContext();   // autoreleased image

    UIGraphicsEndImageContext();

    return image;
}

+ (UIImage *) snapshot:(GLView *)eaglview
{
    NSInteger x = 0;
    NSInteger y = 0;
    NSInteger width = [eaglview backingWidth];
    NSInteger height = [eaglview backingHeight];
    NSInteger dataLength = width * height * 4;

    NSUInteger i;
    for ( i=0; i<100; i++ )
    {
        glFlush();
        CFRunLoopRunInMode(kCFRunLoopDefaultMode, (float)1.0/(float)60.0, FALSE);
    }

    GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));

    // Read pixel data from the framebuffer
    //
    glPixelStorei(GL_PACK_ALIGNMENT, 4);
    glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);

    // Create a CGImage with the pixel data
    // If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
    // otherwise, use kCGImageAlphaPremultipliedLast
    //
    CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, data, dataLength, NULL);
    CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
    CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast, ref, NULL, true, kCGRenderingIntentDefault);

    // OpenGL ES measures data in PIXELS
    // Create a graphics context with the target size measured in POINTS
    //
    NSInteger widthInPoints;
    NSInteger heightInPoints;

    if (NULL != UIGraphicsBeginImageContextWithOptions)
    {
        // On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
        // Set the scale parameter to your OpenGL ES view's contentScaleFactor
        // so that you get a high-resolution snapshot when its value is greater than 1.0
        //
        CGFloat scale = eaglview.contentScaleFactor;
        widthInPoints = width / scale;
        heightInPoints = height / scale;
        UIGraphicsBeginImageContextWithOptions(CGSizeMake(widthInPoints, heightInPoints), NO, scale);
    }
    else
    {
        // On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
        //
        widthInPoints = width;
        heightInPoints = height;
        UIGraphicsBeginImageContext(CGSizeMake(widthInPoints, heightInPoints));
    }

    CGContextRef cgcontext = UIGraphicsGetCurrentContext();

    // UIKit coordinate system is upside down to GL/Quartz coordinate system
    // Flip the CGImage by rendering it to the flipped bitmap context
    // The size of the destination area is measured in POINTS
    //
    CGContextSetBlendMode(cgcontext, kCGBlendModeCopy);
    CGContextDrawImage(cgcontext, CGRectMake(0.0, 0.0, widthInPoints, heightInPoints), iref);

    // Retrieve the UIImage from the current context
    UIImage *image = UIGraphicsGetImageFromCurrentImageContext();   // autoreleased image

    UIGraphicsEndImageContext();

    // Clean up
    free(data);
    CFRelease(ref);
    CFRelease(colorspace);
    CGImageRelease(iref);

    return image;
}

@end