func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
print(transform)
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
}
else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
}
else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
}
else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
else if transform.a == 0 && transform.b == 1.0 && transform.c == 1.0 && transform.d == 0 {
isPortrait = true
}
return (assetOrientation, isPortrait)
}
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
}
else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(M_PI))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)
}
return instruction
}
func mergeVideos(sourceVideos: [URL], completion: @escaping (_ outputURL: URL, _ success: Bool) -> Void) {
let composition = AVMutableComposition()
let track = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID:Int32(kCMPersistentTrackID_Invalid))
var duration = kCMTimeZero
var instructions = [AVVideoCompositionLayerInstruction]()
for url in sourceVideos {
let videoAsset = AVURLAsset(url: url)
if url == sourceVideos.first {
do {
try track.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: kCMTimeZero)
}
catch {
completion(URL(string: "")!, false)
}
}
else {
do {
try track.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: composition.duration)
}
catch {
completion(URL(string: "")!, false)
}
}
duration = CMTimeAdd(duration, videoAsset.duration)
instructions.append(videoCompositionInstructionForTrack(track: track, asset: videoAsset))
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, duration)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 60)
mainComposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
var paths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let documentsDirectory = paths[0] as String
let videoPathToSave = documentsDirectory.stringByAppendingPathComponent(path: "mergeVideo-\(arc4random()%1000)-d.mov")
let videoURLToSave = NSURL(fileURLWithPath: videoPathToSave)
guard let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
completion(URL(string: "")!, false)
return
}
exporter.outputURL = videoURLToSave as URL
exporter.videoComposition = mainComposition
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.shouldOptimizeForNetworkUse = true
exporter.exportAsynchronously {
DispatchQueue.main.async {
for url in sourceVideos {
do {
try FileManager.default.removeItem(at: url)
}
catch {}
}
completion(exporter.outputURL!, true)
}
}
}
此代码适用于后置摄像头,但如果使用面部摄像头录制,则显示错误的缩放。这段代码是我从以下链接考虑的。
https://www.raywenderlich.com/94404/play-record-merge-videos-ios-swift
我真的想知道如何修复脸部相机的方向......