如何使用前后摄像头录制视频并仍然保持录制?

时间:2014-12-17 09:11:48

标签: ios objective-c iphone video avfoundation

我正在使用AVFoundation。我想用两个(​​正面和背面)相机录制视频。当我将相机模式改回前方时,我在一侧录制视频,相机仍然会冻结。是否可以连续录制视频。

示例代码:

- (void) startup
{
 if (_session == nil)
{
    NSLog(@"Starting up server");

    self.isCapturing = NO;
    self.isPaused = NO;
    _currentFile = 0;
    _discont = NO;

    // create capture device with video input
    _session = [[AVCaptureSession alloc] init];
    AVCaptureDevice *backCamera = [self frontCamera];

    AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:backCamera error:nil];
    [_session addInput:input];

    // audio input from default mic
    AVCaptureDevice* mic = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
    AVCaptureDeviceInput* micinput = [AVCaptureDeviceInput deviceInputWithDevice:mic error:nil];
    [_session addInput:micinput];

    // create an output for YUV output with self as delegate
    _captureQueue = dispatch_queue_create("com.softcraftsystems.comss", DISPATCH_QUEUE_SERIAL);
    AVCaptureVideoDataOutput* videoout = [[AVCaptureVideoDataOutput alloc] init];
    [videoout setSampleBufferDelegate:self queue:_captureQueue];
    NSDictionary* setcapSettings = [NSDictionary dictionaryWithObjectsAndKeys:
                                    [NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange], kCVPixelBufferPixelFormatTypeKey,
                                    nil];
    videoout.videoSettings = setcapSettings;
    [_session addOutput:videoout];
    _videoConnection = [videoout connectionWithMediaType:AVMediaTypeVideo];
    // find the actual dimensions used so we can set up the encoder to the same.
    NSDictionary* actual = videoout.videoSettings;
    _cy = [[actual objectForKey:@"Height"] integerValue];
    _cx = [[actual objectForKey:@"Width"] integerValue];

    AVCaptureAudioDataOutput* audioout = [[AVCaptureAudioDataOutput alloc] init];
    [audioout setSampleBufferDelegate:self queue:_captureQueue];
    [_session addOutput:audioout];
    _audioConnection = [audioout connectionWithMediaType:AVMediaTypeAudio];
    // for audio, we want the channels and sample rate, but we can't get those from audioout.audiosettings on ios, so
    // we need to wait for the first sample

    // start capture and a preview layer
    [_session startRunning];

    _preview = [AVCaptureVideoPreviewLayer layerWithSession:_session];
    _preview.videoGravity = AVLayerVideoGravityResizeAspectFill;
   }
  }

  - (AVCaptureDevice *)frontCamera
  {
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
    if ([device position] == AVCaptureDevicePositionFront) {
        return device;
    }
  }
  return nil;
 }

 - (AVCaptureDevice *)backCamera
 {
NSArray *devices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *device in devices) {
    if ([device position] == AVCaptureDevicePositionBack) {
        return device;
    }
}
return nil;
}

- (void) startupFront
{
_session = nil;
[_session stopRunning];
if (_session == nil)
{
    NSLog(@"Starting up server");

    self.isCapturing = NO;
    self.isPaused = NO;
    _currentFile = 0;
    _discont = NO;

    // create capture device with video input
    _session = [[AVCaptureSession alloc] init];
    AVCaptureDevice *backCamera = [self backCamera];
    AVCaptureDeviceInput* input = [AVCaptureDeviceInput deviceInputWithDevice:backCamera error:nil];
    [_session addInput:input];

    // audio input from default mic
    AVCaptureDevice* mic = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
    AVCaptureDeviceInput* micinput = [AVCaptureDeviceInput deviceInputWithDevice:mic error:nil];
    [_session addInput:micinput];

    // create an output for YUV output with self as delegate
    _captureQueue = dispatch_queue_create("com.softcraftsystems.comss", DISPATCH_QUEUE_SERIAL);
    AVCaptureVideoDataOutput* videoout = [[AVCaptureVideoDataOutput alloc] init];
    [videoout setSampleBufferDelegate:self queue:_captureQueue];
    NSDictionary* setcapSettings = [NSDictionary dictionaryWithObjectsAndKeys:
                                    [NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange], kCVPixelBufferPixelFormatTypeKey,
                                    nil];
    videoout.videoSettings = setcapSettings;
    [_session addOutput:videoout];
    _videoConnection = [videoout connectionWithMediaType:AVMediaTypeVideo];
    // find the actual dimensions used so we can set up the encoder to the same.
    NSDictionary* actual = videoout.videoSettings;
    _cy = [[actual objectForKey:@"Height"] integerValue];
    _cx = [[actual objectForKey:@"Width"] integerValue];

    AVCaptureAudioDataOutput* audioout = [[AVCaptureAudioDataOutput alloc] init];
    [audioout setSampleBufferDelegate:self queue:_captureQueue];
    [_session addOutput:audioout];
    _audioConnection = [audioout connectionWithMediaType:AVMediaTypeAudio];
    // for audio, we want the channels and sample rate, but we can't get those from audioout.audiosettings on ios, so
    // we need to wait for the first sample

    // start capture and a preview layer
    [_session startRunning];

    _preview = [AVCaptureVideoPreviewLayer layerWithSession:_session];
    _preview.videoGravity = AVLayerVideoGravityResizeAspectFill;
   }
   }

  - (void) startCapture
  {
   @synchronized(self)
    {
    if (!self.isCapturing)
    {
        NSLog(@"starting capture");

        // create the encoder once we have the audio params
        _encoder = nil;
        self.isPaused = NO;
        _discont = NO;
        _timeOffset = CMTimeMake(0, 0);
        self.isCapturing = YES;
     }
     }
   }

 - (void) stopCapture
 {
@synchronized(self)
{
    if (self.isCapturing)
    {
        NSString* filename = [NSString stringWithFormat:@"capture%d.mp4", _currentFile];
        NSString* path = [NSTemporaryDirectory() stringByAppendingPathComponent:filename];
        NSURL* url = [NSURL fileURLWithPath:path];
        _currentFile++;

        // serialize with audio and video capture

        self.isCapturing = NO;
        dispatch_async(_captureQueue, ^{
            [_encoder finishWithCompletionHandler:^{
                self.isCapturing = NO;
                _encoder = nil;
                ALAssetsLibrary *library = [[ALAssetsLibrary alloc] init];
                [library writeVideoAtPathToSavedPhotosAlbum:url completionBlock:^(NSURL *assetURL, NSError *error){
                    NSLog(@"save completed");
                    [[NSFileManager defaultManager] removeItemAtPath:path error:nil];
                }];
            }];
        });
    }
}
 }

 - (void) pauseCapture
 {
@synchronized(self)
{
    if (self.isCapturing)
    {
        NSLog(@"Pausing capture");
        self.isPaused = YES;
        _discont = YES;
    }
}
 }

    - (void) resumeCapture
   {
  @synchronized(self)
{
    if (self.isPaused)
    {
        NSLog(@"Resuming capture");
        self.isPaused = NO;
    }
}
  }

    - (CMSampleBufferRef) adjustTime:(CMSampleBufferRef) sample by:(CMTime) offset
 {
CMItemCount count;
CMSampleBufferGetSampleTimingInfoArray(sample, 0, nil, &count);
CMSampleTimingInfo* pInfo = malloc(sizeof(CMSampleTimingInfo) * count);
CMSampleBufferGetSampleTimingInfoArray(sample, count, pInfo, &count);
for (CMItemCount i = 0; i < count; i++)
{
    pInfo[i].decodeTimeStamp = CMTimeSubtract(pInfo[i].decodeTimeStamp, offset);
    pInfo[i].presentationTimeStamp = CMTimeSubtract(pInfo[i].presentationTimeStamp, offset);
}
CMSampleBufferRef sout;
CMSampleBufferCreateCopyWithNewTiming(nil, sample, count, pInfo, &sout);
free(pInfo);
return sout;
 }

 - (void) setAudioFormat:(CMFormatDescriptionRef) fmt
 {
const AudioStreamBasicDescription *asbd = CMAudioFormatDescriptionGetStreamBasicDescription(fmt);
_samplerate = asbd->mSampleRate;
_channels = asbd->mChannelsPerFrame;
 }

  - (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
 {
BOOL bVideo = YES;

@synchronized(self)
{
    if (!self.isCapturing  || self.isPaused)
    {
        return;
    }
    if (connection != _videoConnection)
    {
        bVideo = NO;
    }
    if ((_encoder == nil) && !bVideo)
    {
        CMFormatDescriptionRef fmt = CMSampleBufferGetFormatDescription(sampleBuffer);
        [self setAudioFormat:fmt];
        NSString* filename = [NSString stringWithFormat:@"capture%d.mp4", _currentFile];
        NSString* path = [NSTemporaryDirectory() stringByAppendingPathComponent:filename];
        _encoder = [VideoEncoder encoderForPath:path Height:_cy width:_cx channels:_channels samples:_samplerate];
    }
    if (_discont)
    {
        if (bVideo)
        {
            return;
        }
        _discont = NO;
        // calc adjustment
        CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
        CMTime last = bVideo ? _lastVideo : _lastAudio;
        if (last.flags & kCMTimeFlags_Valid)
        {
            if (_timeOffset.flags & kCMTimeFlags_Valid)
            {
                pts = CMTimeSubtract(pts, _timeOffset);
            }
            CMTime offset = CMTimeSubtract(pts, last);
            NSLog(@"Setting offset from %s", bVideo?"video": "audio");
            NSLog(@"Adding %f to %f (pts %f)", ((double)offset.value)/offset.timescale, ((double)_timeOffset.value)/_timeOffset.timescale, ((double)pts.value/pts.timescale));

            // this stops us having to set a scale for _timeOffset before we see the first video time
            if (_timeOffset.value == 0)
            {
                _timeOffset = offset;
            }
            else
            {
                _timeOffset = CMTimeAdd(_timeOffset, offset);
            }
        }
        _lastVideo.flags = 0;
        _lastAudio.flags = 0;
    }

    // retain so that we can release either this or modified one
    CFRetain(sampleBuffer);

    if (_timeOffset.value > 0)
    {
        CFRelease(sampleBuffer);
        sampleBuffer = [self adjustTime:sampleBuffer by:_timeOffset];
    }

    // record most recent time so we know the length of the pause
    CMTime pts = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
    CMTime dur = CMSampleBufferGetDuration(sampleBuffer);
    if (dur.value > 0)
    {
        pts = CMTimeAdd(pts, dur);
    }
    if (bVideo)
    {
        _lastVideo = pts;
    }
    else
    {
        _lastAudio = pts;
    }
}

// pass frame to encoder
[_encoder encodeFrame:sampleBuffer isVideo:bVideo];
CFRelease(sampleBuffer);
 }

 - (void) shutdown
 {
NSLog(@"shutting down server");
if (_session)
{
    [_session stopRunning];
    _session = nil;
}
[_encoder finishWithCompletionHandler:^{
    NSLog(@"Capture completed");
}];
 }

1 个答案:

答案 0 :(得分:0)

据我说。这是不可能的,当我们切换相机时继续录制, 因为,它们之间存在分辨率和质量差异,视频在整个视频中只能有一种分辨率和质量。 第二,每次在相机之间切换时,它都会分配并初始化相机。 不幸的是,根据我不可能。 但是如果你找到解决方案,请告诉我。