我正在尝试将许多视频合并为一个构图,以便可以使用CGAFFine对其进行动画处理。所有动画在所有视频层上都可以正常工作。但是问题是所有视频层都显示相同的轨迹。在不同的AVMutableVideoCompositionLayerInstructions中添加了不同的曲目。
这是我的合并代码和图像 unexpected output video
func newoverlay(videoURls:[URL]) {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
let layercomposition = AVMutableVideoComposition()
var tracks = [videoTrack]()
var videoLayers = [CALayer]()
let mainInstruction = AVMutableVideoCompositionInstruction()
var instructions = [AVMutableVideoCompositionLayerInstruction]()
var duration:CMTime = .zero
// 2 - Create video tracks
for i in 0 ..< multiLayerVideoUrls.count{
if multiLayerVideoUrls[i] == URL(fileURLWithPath: ""){
print("empty url")
}
else{
let videoAsset = AVURLAsset(url: multiLayerVideoUrls[i])
print(multiLayerVideoUrls[i])
print("number of videoAssets are : \(i)")
guard let track = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID:Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try track.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration),
of: videoAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Failed to load first track")
return
}
let currentTrack:videoTrack = videoTrack(track: track, atNumber: i)
tracks.append(currentTrack)
duration = CMTimeAdd(duration, videoAsset.duration)
}
}
let width:CGFloat = 720
let height: CGFloat = 1280
//bg layer
let bglayer = CALayer()
bglayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
bglayer.backgroundColor = videoOverView.backgroundColor?.cgColor
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: duration)
tracks.enumerated().forEach { index, track in
let videolayer = CALayer()
if keyframesAdded[index]{
videolayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
var startScale:CGFloat = 1
var endScale:CGFloat = 0
var startSecond:CGFloat = 0
var endSecond:CGFloat = 0
var startPoint:CGPoint = CGPoint(x: 0, y: 0)
var EndPoint:CGPoint = videolayer.position
var startAngle:Double = 0
var endAngle:Double = 0
for point in animationKeyPointsArray[index]{
endSecond = CGFloat(point.atTime.value)
print("endSecond is \(endSecond)")
let timeInterval: CFTimeInterval = CFTimeInterval(endSecond-startSecond )
endScale = point.resize
EndPoint = CGPoint(x: videolayer.position.x + point.transform.x, y: videolayer.position.y - point.transform.y)
endAngle = Double(point.rotate)
if startSecond == 0 {
startSecond = 0.01
}
let scaleAnimation = AnimationHelper.constructScaleAnimation(startingScale: startScale, endingScale: endScale, animationDuration: timeInterval)
scaleAnimation.beginTime = CFTimeInterval(exactly: startSecond)!
let MoveAnimation = AnimationHelper.constructPositionAnimation(startingPoint: startPoint, endPoint: EndPoint, animationDuration: timeInterval)
MoveAnimation.beginTime = CFTimeInterval(exactly: startSecond)!
let rotateAnimation = AnimationHelper.constructRotationAnimation(startValue: startAngle, endValue: endAngle, animationDuration: timeInterval)
rotateAnimation.beginTime = CFTimeInterval(exactly: startSecond)!
videolayer.add(scaleAnimation, forKey: nil)
videolayer.add(MoveAnimation, forKey: nil)
videolayer.add(rotateAnimation, forKey: nil)
startSecond = endSecond
startScale = endScale
startPoint = EndPoint
startAngle = endAngle
}
}
else{
print("translation is \(GestureTranslation.x),\(GestureTranslation.y)")
let xMultiplier = width/videoOverView.frame.width
let yMultiplier = height/videoOverView.frame.height
let translationX = GestureTranslation.x * xMultiplier
let translationY = -(GestureTranslation.y * yMultiplier)
videolayer.frame = CGRect(x: translationX, y: translationY, width: width , height: height)
print(GestureTranslation.x,GestureTranslation.y)
videolayer.setAffineTransform(CGAffineTransform(scaleX: GestureReSize, y: GestureReSize))
}
if maskImageAdded{
let maskedImage = UIImageView()
maskedImage.frame = CGRect(x: 0, y: height/2 - width/2, width: width, height: width)
maskedImage.image = maskImageView.image
videolayer.mask = maskedImage.layer
}
videolayer.backgroundColor = UIColor.clear.cgColor
videoLayers.append(videolayer)
print(tracks.count)
print(track.track)
let firstInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: tracks[index].track)
let bugFixTransform = CGAffineTransform(scaleX: width / track.track.naturalSize.width, y: height / track.track.naturalSize.height)
firstInstruction.setTransform(bugFixTransform, at: .zero)
instructions.append(firstInstruction)
print(instructions.count)
}
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
parentlayer.addSublayer(bglayer)
for videolayer in videoLayers {
parentlayer.addSublayer(videolayer)
}
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.renderSize = CGSize(width: width, height: height)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayers: videoLayers, in: parentlayer)
mainInstruction.layerInstructions = instructions
layercomposition.instructions = [mainInstruction]
mainInstruction.backgroundColor = UIColor.clear.cgColor
print("composition Done")
答案 0 :(得分:1)
据我了解,您出现问题是因为您在此行中以错误的方式插入了时间范围。您总是在CMTime.zero处插入。
试试track.insertTimeRange(CMTimeRangeMake(开始:CMTime.zero,持续时间:videoAsset.duration), 之:videoAsset.tracks(withMediaType:.video)[0], 在:CMTime.zero)
您应该做的是跟踪先前插入的时间范围,并将其添加到for循环的下一个周期中。这是我用于合并视频数组的示例代码。注意inserTime变量。
let mainComposition = AVMutableComposition()
let compositionVideoTrack = mainComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionVideoTrack?.preferredTransform = CGAffineTransform(rotationAngle: .pi / 2)
let compositionAudioTrack = mainComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
var insertTime = CMTime.zero
for videoUrl in videoUrls {
let videoAsset = AVURLAsset(url: videoUrl)
guard let videoTrack = videoAsset.tracks(withMediaType: .video).first else {
return
}
do {
try compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: videoAsset.duration), of: videoTrack, at: insertTime)
} catch {
return
}
if let audioTrack = videoAsset.tracks(withMediaType: .audio).first {
do {
try compositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: videoAsset.duration), of: audioTrack, at: insertTime)
} catch {
return
}
}
insertTime = CMTimeAdd(insertTime, videoAsset.duration)
}