答案 0 :(得分:6)
1。单个视频支持❤️
1.多个视频合并支持❤️
2.以任何比例支持任何画布❤️
3.将最终视频保存到相机胶卷❤️
5.管理所有视频方向❤️
我可能迟到了这个答案,但仍然没有找到解决此要求的任何方法。因此,分享我的工作:
向视频添加模糊背景的步骤:
合并视频
func mergeVideos(_ videos: Array<AVURLAsset>, inArea area:CGSize, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {
// Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
let mixComposition = AVMutableComposition()
var instructionLayers : Array<AVMutableVideoCompositionLayerInstruction> = []
for asset in videos {
// Here we are creating the AVMutableCompositionTrack. See how we are adding a new track to our AVMutableComposition.
let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// Now we set the length of the track equal to the length of the asset and add the asset to out newly created track at kCMTimeZero for first track and lastAssetTime for current track so video plays from the start of the track to end.
if let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first {
/// Hide time for this video's layer
let opacityStartTime: CMTime = CMTimeMakeWithSeconds(0, asset.duration.timescale)
let opacityEndTime: CMTime = CMTimeAdd(mixComposition.duration, asset.duration)
let hideAfter: CMTime = CMTimeAdd(opacityStartTime, opacityEndTime)
let timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)
try? track?.insertTimeRange(timeRange, of: videoTrack, at: mixComposition.duration)
/// Layer instrcution
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track!)
layerInstruction.setOpacity(0.0, at: hideAfter)
/// Add logic for aspectFit in given area
let properties = scaleAndPositionInAspectFillMode(forTrack: videoTrack, inArea: area)
/// Checking for orientation
let videoOrientation: UIImageOrientation = self.getVideoOrientation(forTrack: videoTrack)
let assetSize = self.assetSize(forTrack: videoTrack)
if (videoOrientation == .down) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = 0
if properties.position.y == 0 {
xtranslation = -(assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .left) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .right) {
/// No need to rotate
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
let translationTransform = CGAffineTransform(translationX: properties.position.x, y: properties.position.y)
let finalTransform = scaleTransform.concatenating(translationTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = 0
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = -(assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
instructionLayers.append(layerInstruction)
}
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
mainInstruction.layerInstructions = instructionLayers
let mainCompositionInst = AVMutableVideoComposition()
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.renderSize = area
//let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
let url = self.videoOutputURL
try? FileManager.default.removeItem(at: url)
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = .mp4
exporter?.videoComposition = mainCompositionInst
exporter?.shouldOptimizeForNetworkUse = true
exporter?.exportAsynchronously(completionHandler: {
if let anError = exporter?.error {
completion(anError, nil)
}
else if exporter?.status == AVAssetExportSessionStatus.completed {
completion(nil, url)
}
})
}
添加模糊效果
func addBlurEffect(toVideo asset:AVURLAsset, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {
let filter = CIFilter(name: "CIGaussianBlur")
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source: CIImage? = request.sourceImage.clampedToExtent()
filter?.setValue(source, forKey: kCIInputImageKey)
filter?.setValue(10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output: CIImage? = filter?.outputImage?.cropped(to: request.sourceImage.extent)
// Provide the filter output to the composition
if let anOutput = output {
request.finish(with: anOutput, context: nil)
}
})
//let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
let url = self.videoOutputURL
// Remove any prevouis videos at that path
try? FileManager.default.removeItem(at: url)
let exporter = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetHighestQuality)
// assign all instruction for the video processing (in this case the transformation for cropping the video
exporter?.videoComposition = composition
exporter?.outputFileType = .mp4
exporter?.outputURL = url
exporter?.exportAsynchronously(completionHandler: {
if let anError = exporter?.error {
completion(anError, nil)
}
else if exporter?.status == AVAssetExportSessionStatus.completed {
completion(nil, url)
}
})
}
在模糊视频的中心处逐一放置视频
这将是您的最终视频网址。
func addAllVideosAtCenterOfBlur(videos: Array<AVURLAsset>, blurVideo: AVURLAsset, completion: @escaping (_ error: Error?, _ url:URL?) -> Swift.Void) {
// Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
let mixComposition = AVMutableComposition()
var instructionLayers : Array<AVMutableVideoCompositionLayerInstruction> = []
// Add blur video first
let blurVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// Blur layer instruction
if let videoTrack = blurVideo.tracks(withMediaType: AVMediaType.video).first {
let timeRange = CMTimeRangeMake(kCMTimeZero, blurVideo.duration)
try? blurVideoTrack?.insertTimeRange(timeRange, of: videoTrack, at: kCMTimeZero)
}
/// Add other videos at center of the blur video
var startAt = kCMTimeZero
for asset in videos {
/// Time Range of asset
let timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)
// Here we are creating the AVMutableCompositionTrack. See how we are adding a new track to our AVMutableComposition.
let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// Now we set the length of the track equal to the length of the asset and add the asset to out newly created track at kCMTimeZero for first track and lastAssetTime for current track so video plays from the start of the track to end.
if let videoTrack = asset.tracks(withMediaType: AVMediaType.video).first {
/// Hide time for this video's layer
let opacityStartTime: CMTime = CMTimeMakeWithSeconds(0, asset.duration.timescale)
let opacityEndTime: CMTime = CMTimeAdd(startAt, asset.duration)
let hideAfter: CMTime = CMTimeAdd(opacityStartTime, opacityEndTime)
/// Adding video track
try? track?.insertTimeRange(timeRange, of: videoTrack, at: startAt)
/// Layer instrcution
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track!)
layerInstruction.setOpacity(0.0, at: hideAfter)
/// Add logic for aspectFit in given area
let properties = scaleAndPositionInAspectFitMode(forTrack: videoTrack, inArea: size)
/// Checking for orientation
let videoOrientation: UIImageOrientation = self.getVideoOrientation(forTrack: videoTrack)
let assetSize = self.assetSize(forTrack: videoTrack)
if (videoOrientation == .down) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = 0
if properties.position.y == 0 {
xtranslation = -(assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .left) {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: -CGFloat(Double.pi))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = assetSize.height
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = assetSize.height - (assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else if (videoOrientation == .right) {
/// No need to rotate
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
let translationTransform = CGAffineTransform(translationX: properties.position.x, y: properties.position.y)
let finalTransform = scaleTransform.concatenating(translationTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
else {
/// Rotate
let defaultTransfrom = asset.preferredTransform
let rotateTransform = CGAffineTransform(rotationAngle: CGFloat(Double.pi/2.0))
// Scale
let scaleTransform = CGAffineTransform(scaleX: properties.scale.width, y: properties.scale.height)
// Translate
var ytranslation: CGFloat = 0
var xtranslation: CGFloat = assetSize.width
if properties.position.y == 0 {
xtranslation = assetSize.width - (assetSize.width - ((size.width/size.height) * assetSize.height))/2.0
}
else {
ytranslation = -(assetSize.height - ((size.height/size.width) * assetSize.width))/2.0
}
let translationTransform = CGAffineTransform(translationX: xtranslation, y: ytranslation)
// Final transformation - Concatination
let finalTransform = defaultTransfrom.concatenating(rotateTransform).concatenating(translationTransform).concatenating(scaleTransform)
layerInstruction.setTransform(finalTransform, at: kCMTimeZero)
}
instructionLayers.append(layerInstruction)
}
/// Adding audio
if let audioTrack = asset.tracks(withMediaType: AVMediaType.audio).first {
let aTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
try? aTrack?.insertTimeRange(timeRange, of: audioTrack, at: startAt)
}
// Increase the startAt time
startAt = CMTimeAdd(startAt, asset.duration)
}
/// Blur layer instruction
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: blurVideoTrack!)
instructionLayers.append(layerInstruction)
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, blurVideo.duration)
mainInstruction.layerInstructions = instructionLayers
let mainCompositionInst = AVMutableVideoComposition()
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.renderSize = size
//let url = URL(fileURLWithPath: "/Users/enacteservices/Desktop/final_video.mov")
let url = self.videoOutputURL
try? FileManager.default.removeItem(at: url)
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = .mp4
exporter?.videoComposition = mainCompositionInst
exporter?.shouldOptimizeForNetworkUse = true
exporter?.exportAsynchronously(completionHandler: {
if let anError = exporter?.error {
completion(anError, nil)
}
else if exporter?.status == AVAssetExportSessionStatus.completed {
completion(nil, url)
}
})
}
有关上述代码中使用的帮助方法,请下载随附的示例代码。
另外,如果有任何更短的方法,我也期待您的光临。因为我必须将视频导出3次才能实现这一点。
答案 1 :(得分:1)
从iOS 9.0开始您可以使用AVAsynchronousCIImageFilteringRequest
请参阅docs了解详情
或者您可以使用AVVideoCompositing
见example用法
答案 2 :(得分:1)
您可以使用经过测试的AVVideoComposition在视频上添加模糊效果。
-(void)applyBlurOnAsset:(AVAsset *)asset Completion:(void(^)(BOOL success, NSError* error, NSURL* videoUrl))completion{
CIFilter *filter = [CIFilter filterWithName:@"CIGaussianBlur"];
AVVideoComposition *composition = [AVVideoComposition videoCompositionWithAsset: asset
applyingCIFiltersWithHandler:^(AVAsynchronousCIImageFilteringRequest *request){
// Clamp to avoid blurring transparent pixels at the image edges
CIImage *source = [request.sourceImage imageByClampingToExtent];
[filter setValue:source forKey:kCIInputImageKey];
[filter setValue:[NSNumber numberWithDouble:10.0] forKey:kCIInputRadiusKey];
// Crop the blurred output to the bounds of the original image
CIImage *output = [filter.outputImage imageByCroppingToRect:request.sourceImage.extent];
// Provide the filter output to the composition
[request finishWithImage:output context:nil];
}];
NSURL *outputUrl = [[NSURL alloc] initWithString:@"Your Output path"];
//Remove any prevouis videos at that path
[[NSFileManager defaultManager] removeItemAtURL:outputUrl error:nil];
AVAssetExportSession *exporter = [[AVAssetExportSession alloc] initWithAsset:asset presetName:AVAssetExportPreset960x540] ;
// assign all instruction for the video processing (in this case the transformation for cropping the video
exporter.videoComposition = composition;
exporter.outputFileType = AVFileTypeMPEG4;
if (outputUrl){
exporter.outputURL = outputUrl;
[exporter exportAsynchronouslyWithCompletionHandler:^{
switch ([exporter status]) {
case AVAssetExportSessionStatusFailed:
NSLog(@"crop Export failed: %@", [[exporter error] localizedDescription]);
if (completion){
dispatch_async(dispatch_get_main_queue(), ^{
completion(NO,[exporter error],nil);
});
return;
}
break;
case AVAssetExportSessionStatusCancelled:
NSLog(@"crop Export canceled");
if (completion){
dispatch_async(dispatch_get_main_queue(), ^{
completion(NO,nil,nil);
});
return;
}
break;
default:
break;
}
if (completion){
dispatch_async(dispatch_get_main_queue(), ^{
completion(YES,nil,outputUrl);
});
}
}];
}
}
答案 3 :(得分:0)
你试过这个吗?
let blurEffect = UIBlurEffect(style: .light)
var blurredView = UIVisualEffectView(effect: blurEffect)
blurredView.frame = view.bounds // Set the frame around
// add the blurredView on top of what you want blurred
view.addSubview(blurredView)