我尝试使用AVCaptureVideoPreviewLayer使用叠加视图捕获视频,但视频在没有叠加视图的情况下保存。基本上我的应用程序是设置一个对象而不是一个脸,这工作正常,我也可以拍摄图像(见下面的链接)。捕获视频是唯一的问题。
https://i.stack.imgur.com/J5T15.jpg
CMFormatDescriptionRef formatDescriptionRef = CMSampleBufferGetFormatDescription(sampleBuffer);
const AudioStreamBasicDescription *audioStreamBasicDescription = CMAudioFormatDescriptionGetStreamBasicDescription(fdesc);
_sampleRate = audioStreamBasicDescription -> mSampleRate;
_audioChannel = audioStreamBasicDescription -> mChannelsPerFrame;
NSDateFormatter *dateFormatter = [[NSDateFormatter alloc] init];
dateFormatter.dateFormat = @"HH:mm:ss";
NSDate *currentDate = [NSDate dateWithTimeIntervalSince1970:[[NSDate date] timeIntervalSince1970]];
NSString *currentDateString = [dateFormatter stringFromDate:currentDate];
NSString *videoName = [NSString stringWithFormat:@"video_%@.mp4", currentDateString];
_videoPath = [self.cacheDirectoryPath stringByAppendingPathComponent:videoName];
_resolutionWidth = 360;
_resolutionHeight = 640;
_recordingWriter = [VideoRecordingWriter recordingWriterWithVideoPath:_videoPath
resolutionWidth:_resolutionWidth resolutionHeight:_resolutionHeight
audioChannel:_audioChannel sampleRate:_sampleRate];
CMTime presentationTimeStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
if (_startRecordingCMTime.value == 0) {
_startRecordingCMTime = presentationTimeStamp;
}
CMTime subtract = CMTimeSubtract(presentationTimeStamp, _startRecordingCMTime);
_currentRecordingTime = CMTimeGetSeconds(subtract);
if (_currentRecordingTime > _maxRecordingTime) {
if (_currentRecordingTime - _maxRecordingTime >= 0.1) {
return;
}
}
[_recordingWriter writeWithSampleBuffer:sampleBuffer isVideo:YES];
dispatch_async(dispatch_get_main_queue(), ^{
[self updateRecordingProgress:_currentRecordingTime / _maxRecordingTime];
});
答案 0 :(得分:0)
func startCapturing(view: SKView, background: CALayer, done: @escaping((_:CALayer)->())) {
let device = getCamera()!
do {
captureSession.beginConfiguration()
// Set up INPUTS
try captureSession.addInput(AVCaptureDeviceInput(device: device))
captureSession.sessionPreset = AVCaptureSessionPresetHigh
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if let previewLayer = previewLayer {
// add background
previewLayer.addSublayer(background)
// init preview
previewLayer.name = "camera"
previewLayer.frame.size = view.bounds.size
if IS_DEBUG {
print("VideoService: size of previewLayer")
print(previewLayer.frame.size)
}
previewLayer.position = CGPoint(
x: view.frame.width/2, y: view.frame.height/2)
view.layer.addSublayer(previewLayer)
done(previewLayer)
}
captureSession.commitConfiguration()
captureSession.startRunning()
print("\n\nVideoService: camera configured")
} catch {
print("Error: start capturing failed")
}
}
func getCaptureSessionBack() -> AVCaptureDeviceDiscoverySession {
session = session ?? AVCaptureDeviceDiscoverySession(
deviceTypes: [deviceTypeBackCamera],
mediaType: AVMediaTypeVideo,
position: position)
return session!
}
func getCamera() -> AVCaptureDevice? {
device = device ?? getCaptureSessionBack().devices.first
return device
}