我正在使用AVMutableComposition合并纵向和横向视频。但是,风景部分在导出后会变得很奇怪:Image
如您在图像中看到的那样,红色方框中的视频是最右边的视频,但它上面还有另一个方框。
这是我用来合并视频的功能:
func mergeVideos(arrayVideos: [AVAsset], completionHandler: @escaping ExportedHandler) {
var insertTime = CMTime.zero
var arrayLayerInstructions: [AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: .video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Init composition
let mixComposition = AVMutableComposition()
// Init video & audio composition track
guard let videoCompositionTrack =
mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let audioCompositionTrack =
mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
else { return }
for videoAsset in arrayVideos {
// Get video track and audio track
let videoTrack = videoAsset.tracks(withMediaType: .video)[0]
let audioTrack = videoAsset.tracks(withMediaType: .audio)[0]
let timeRange = CMTimeRangeMake(start: .zero, duration: videoAsset.duration)
do {
// Add video track to video composition at specific time
try videoCompositionTrack.insertTimeRange(timeRange, of: videoTrack, at: insertTime)
} catch {
print("Load video track error")
}
do {
try audioCompositionTrack.insertTimeRange(timeRange, of: audioTrack, at: insertTime)
} catch {
print("Load audio track error")
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, videoAsset.duration)
layerInstruction.setOpacity(0.0, at: insertTime)
arrayLayerInstructions.append(layerInstruction)
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportUrl = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(at: exportUrl)
// Init exporter
guard let assetExport = AVAssetExportSession(asset: mixComposition,
presetName: AVAssetExportPresetHighestQuality) else { return }
assetExport.videoComposition = mainComposition
assetExport.outputURL = exportUrl
assetExport.outputFileType = .mp4
// Do export
assetExport.exportAsynchronously(completionHandler: { [weak self] in
switch assetExport.status {
case .completed:
self?.exportDidFinish(exporter: assetExport, videoURL: exportUrl, completion: completionHandler)
case .failed:
completionHandler(nil, nil, assetExport.error)
print("failed:", assetExport.error as Any)
case .cancelled:
completionHandler(nil, nil, assetExport.error)
print("cancelled", assetExport.error as Any)
default:
print("complete")
}
})
}
这是我用来分析视频方向的辅助方法:
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
private func videoCompositionInstructionForTrack(track: AVCompositionTrack,
asset: AVAsset,
standardSize: CGSize,
atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var aspectFillRatio: CGFloat = 1
if assetInfo.isPortrait {
aspectFillRatio = standardSize.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width / 2 - (assetTrack.naturalSize.height * aspectFillRatio) / 2
let posY = standardSize.height / 2 - (assetTrack.naturalSize.width * aspectFillRatio) / 2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
let concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)
instruction.setTransform(concat, at: atTime)
} else {
aspectFillRatio = standardSize.width / assetTrack.naturalSize.width
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width / 2 - (assetTrack.naturalSize.width * aspectFillRatio) / 2
let posY = standardSize.height / 2 - (assetTrack.naturalSize.height * aspectFillRatio) / 2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
}
instruction.setTransform(concat, at: atTime)
}
return instruction
}
我发现,如果我在下面的for-in循环中放置以下代码,此问题将得到解决,但是一旦我一次合并10个以上的视频,我的应用就会崩溃。
guard let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) else { return }
我已经在这个问题上困扰了很多天,但仍然不知道为什么。任何帮助将不胜感激。非常感谢你!