在我的应用中,我使用以下代码打开视频预览图层:
AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput
deviceInputWithDevice:[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]
error:nil];
/*We setupt the output*/
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];
captureOutput.alwaysDiscardsLateVideoFrames = YES;
dispatch_queue_t queue;
queue = dispatch_queue_create("cameraQueue", NULL);
[captureOutput setSampleBufferDelegate:self queue:queue];
dispatch_release(queue);
NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
[captureOutput setVideoSettings:videoSettings];
self.captureSession = [[AVCaptureSession alloc] init];
[self.captureSession addInput:captureInput];
[self.captureSession addOutput:captureOutput];
/*We use medium quality, ont the iPhone 4 this demo would be laging too much, the conversion in UIImage and CGImage demands too much ressources for a 720p resolution.*/
[self.captureSession setSessionPreset:AVCaptureSessionPresetMedium];
CGRect Vframe;
Vframe = CGRectMake(self.viewNo2.frame.origin.x, self.viewNo2.frame.origin.y, self.viewNo2.frame.size.width, self.viewNo2.frame.size.height);
/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/
self.customLayer = [CALayer layer];
self.customLayer.frame = Vframe;
self.customLayer.contentsGravity = kCAGravityResizeAspect;
[self.view.layer addSublayer:self.customLayer];
CGRect VFrame1;
VFrame1 = CGRectMake(self.viewNo3.frame.origin.x, self.viewNo3.frame.origin.y, self.viewNo3.frame.size.width, self.viewNo3.frame.size.height);
/*We add the Custom Layer (We need to change the orientation of the layer so that the video is displayed correctly)*/
self.customLayer1 = [CALayer layer];
self.customLayer1.frame = VFrame1;
self.customLayer1.contentsGravity = kCAGravityResizeAspect;
[self.view.layer addSublayer:self.customLayer1];
///*We add the imageView*/
//self.imageView = [[UIImageView alloc] init];
//self.imageView.frame = CGRectMake(9, 9, 137, 441);
//[self.view addSubview:self.imageView];
/*We add the preview layer*/
CGRect VFrame2;
VFrame2 = CGRectMake(self.viewNo1.frame.origin.x, self.viewNo1.frame.origin.y, self.viewNo1.frame.size.width, self.viewNo1.frame.size.height);
self.prevLayer = [AVCaptureVideoPreviewLayer layerWithSession: self.captureSession];
self.prevLayer.frame = VFrame2;
self.prevLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.view.layer addSublayer: self.prevLayer];
/*We start the capture*/
[self.captureSession startRunning];
当我尝试使用此方法捕获屏幕时:
-(IBAction)Photo{
CGRect rect = [self.view bounds];
UIGraphicsBeginImageContextWithOptions(rect.size,YES,0.0f);
CGContextRef context = UIGraphicsGetCurrentContext();
[self.view.layer renderInContext:context];
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIImageWriteToSavedPhotosAlbum(viewImage, nil, nil, nil);}
prevLayer没有被捕获,我错过了什么吗?
答案 0 :(得分:3)
AVCaptureVideoPreviewLayer
不响应使用UIGraphicsGetImageFromCurrentImageContext()
捕获屏幕。这只是苹果公司做的一个奇怪的规则。获取当前屏幕图像的唯一方法是从AVCaptureInput
进入图像数据缓冲区。然后可以手动将其添加到屏幕截图中。