ios - uiimage数组到视频图像的方向和比例

时间:2016-03-06 14:00:18

标签: ios swift

我正在使用uiimage数组创建视频。

我使用过多个stackoverflow帖子,并混合和修改了很多提供的代码。

我将包含我目前正在使用的文件。

这些图片目前正是这样出现的,我之所以这么想,是因为它们变成了CGImage,它们不会保留定位吗?

另外,如何处理用户拍摄水平照片时的缩放比例。

我基本上想要在拍摄时显示图像。

前两张图像是垂直拍摄的,看起来向左翻转90度,最后一张水平翻转。

谢谢 enter image description here

enter image description here

enter image description here

类别:

import AVFoundation
import UIKit

let kErrorDomain = "TimeLapseBuilder"
let kFailedToStartAssetWriterError = 0
let kFailedToAppendPixelBufferError = 1

public class TimeLapseBuilder: NSObject {
  var photos: [UIImage]
  var videoWriter: AVAssetWriter?
  var outputSize = CGSizeMake(1920, 1080)

  public init(photos: [UIImage]) {
    self.photos = photos

    super.init()
  }

  public func build(outputSize outputSize: CGSize, progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {

    self.outputSize = outputSize
    var error: NSError?

    let startTime = NSDate.timeIntervalSinceReferenceDate()

    let fileManager = NSFileManager.defaultManager()
    let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
    guard let documentDirectory: NSURL = urls.first else {
      fatalError("documentDir Error")
    }

    let videoOutputURL = documentDirectory.URLByAppendingPathComponent("AssembledVideo.mov")

    if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
      do {
        try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
      }catch{
        fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
      }
    }

    guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeQuickTimeMovie) else{
      fatalError("AVAssetWriter error")
    }

    let outputSettings = [
      AVVideoCodecKey  : AVVideoCodecH264,
      AVVideoWidthKey  : NSNumber(float: Float(outputSize.width)),
      AVVideoHeightKey : NSNumber(float: Float(outputSize.height)),
    ]

    guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
      fatalError("Negative : Can't apply the Output settings...")
    }

    let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)

    let sourcePixelBufferAttributesDictionary = [
      kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
      kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)),
      kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height)),
    ]

    let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
      assetWriterInput: videoWriterInput,
      sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary
    )

    assert(videoWriter.canAddInput(videoWriterInput))
    videoWriter.addInput(videoWriterInput)

    if videoWriter.startWriting() {
      videoWriter.startSessionAtSourceTime(kCMTimeZero)
      assert(pixelBufferAdaptor.pixelBufferPool != nil)

      let media_queue = dispatch_queue_create("mediaInputQueue", nil)

      videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
        let fps: Int32 = 1
        let frameDuration = CMTimeMake(1, fps)
        let currentProgress = NSProgress(totalUnitCount: Int64(self.photos.count))

        var frameCount: Int64 = 0

        while (!self.photos.isEmpty) {


          if (videoWriterInput.readyForMoreMediaData) {
            let nextPhoto = self.photos.removeAtIndex(0)
            let lastFrameTime = CMTimeMake(frameCount, fps)
            let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)



            if !self.appendPixelBufferForImage(nextPhoto, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
              error = NSError(domain: kErrorDomain, code: kFailedToAppendPixelBufferError,
                userInfo: [
                  "description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
                  "rawError": videoWriter.error ?? "(none)"
                ])

              break
            }

            frameCount++

            currentProgress.completedUnitCount = frameCount
            progress(currentProgress)
          }
        }

        let endTime = NSDate.timeIntervalSinceReferenceDate()
        let elapsedTime: NSTimeInterval = endTime - startTime

        print("rendering time \(self.stringFromTimeInterval(elapsedTime))")


        videoWriterInput.markAsFinished()
        videoWriter.finishWritingWithCompletionHandler { () -> Void in
          if error == nil {
            success(videoOutputURL)
          }
        }
      })


    } else {
      error = NSError(domain: kErrorDomain, code: kFailedToStartAssetWriterError,
        userInfo: ["description": "AVAssetWriter failed to start writing"]
      )
    }

    if let error = error {
      failure(error)
    }
  }

  public func appendPixelBufferForImage(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
    var appendSucceeded = true

    autoreleasepool {

        var pixelBuffer: CVPixelBuffer? = nil
        let options: [NSObject : AnyObject] = [
            kCVPixelBufferCGImageCompatibilityKey : Int(true),
            kCVPixelBufferCGBitmapContextCompatibilityKey : Int(true)
        ]

        let status: CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, options as CFDictionaryRef, &pixelBuffer)

        if let pixelBuffer = pixelBuffer where status == 0 {
          let managedPixelBuffer = pixelBuffer

            print("Scaleeee \(image.scale)")
            print("Widthhhh \(image.size.width)")
            print("Heighttt \(image.size.height)")


            pixelBufferFromImage(image.CGImage!, pxbuffer: managedPixelBuffer, andSize: CGSize(width: image.size.width, height: image.size.height))

          appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)

        } else {
          NSLog("error: Failed to allocate pixel buffer from pool")
        }

    }

    return appendSucceeded
  }

    func pixelBufferFromImage(image: CGImageRef, pxbuffer: CVPixelBuffer, andSize size: CGSize){

        CVPixelBufferLockBaseAddress(pxbuffer, 0)
        let pxdata = CVPixelBufferGetBaseAddress(pxbuffer)
        let rgbColorSpace = CGColorSpaceCreateDeviceRGB()

        let context = CGBitmapContextCreate(pxdata, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(pxbuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)

        CGContextConcatCTM(context, CGAffineTransformMakeRotation(0))

        CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), image)

        CVPixelBufferUnlockBaseAddress(pxbuffer, 0)
    }

//    //Old fillpixel method
//  func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBuffer, contentMode:UIViewContentMode){
//
//    CVPixelBufferLockBaseAddress(pixelBuffer, 0)
////    
//    let data = CVPixelBufferGetBaseAddress(pixelBuffer)
//    let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
//    let context = CGBitmapContextCreate(data, Int(self.outputSize.width), Int(self.outputSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
//
//    CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.outputSize.width), CGFloat(self.outputSize.height)))
//    
//    let horizontalRatio = CGFloat(self.outputSize.width) / image.size.width
//    let verticalRatio = CGFloat(self.outputSize.height) / image.size.height
//    var ratio: CGFloat = 1
//    
////    print("horizontal ratio \(horizontalRatio)")
////    print("vertical ratio \(verticalRatio)")
////    print("ratio \(ratio)")
////    print("Image Width -  \(image.size.width). Image Height - \(image.size.height)")
//    
//    switch(contentMode) {
//    case .ScaleAspectFill:
//      ratio = max(horizontalRatio, verticalRatio)
//    case .ScaleAspectFit:
//      ratio = min(horizontalRatio, verticalRatio)
//    default:
//      ratio = min(horizontalRatio, verticalRatio)
//    }
//    
//    let newSize:CGSize = CGSizeMake(image.size.width * ratio, image.size.height * ratio)
//    
//    let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
//    let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
//
//    CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), image.CGImage)
//
//    CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
//  }


  func stringFromTimeInterval(interval: NSTimeInterval) -> String {
    let ti = NSInteger(interval)
    let ms = Int((interval % 1) * 1000)
    let seconds = ti % 60
    let minutes = (ti / 60) % 60
    let hours = (ti / 3600)

    if hours > 0 {
      return NSString(format: "%0.2d:%0.2d:%0.2d.%0.2d", hours, minutes, seconds, ms) as String
    }else if minutes > 0 {
      return NSString(format: "%0.2d:%0.2d.%0.2d", minutes, seconds, ms) as String
    }else {
      return NSString(format: "%0.2d.%0.2d", seconds, ms) as String
    }
    }

}

1 个答案:

答案 0 :(得分:1)

这是UIImage的常见问题,因为方向通常来自相机上的EXIF数据。

我在UIImage上写了一个扩展,允许我以正确的方向访问图像的实例。

extension UIImage {
    var fixedOrientation: UIImage {
        if self.imageOrientation == .Up {
            return self
        }

        var transform: CGAffineTransform = CGAffineTransformIdentity

        switch (self.imageOrientation) {
        case .Down:
            transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
            break
        case .DownMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
            break
        case .Left:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
            break
        case .LeftMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
            break
        case .Right:
            transform = CGAffineTransformTranslate(transform, 0, self.size.height)
            transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
            break
        case .RightMirrored:
            transform = CGAffineTransformTranslate(transform, 0, self.size.height)
            transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
            break
        case .Up:
            break
        case .UpMirrored:
            break
        }

        switch (self.imageOrientation) {
        case .UpMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break;
        case .DownMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.width, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break;
        case .LeftMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.height, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break
        case .RightMirrored:
            transform = CGAffineTransformTranslate(transform, self.size.height, 0)
            transform = CGAffineTransformScale(transform, -1, 1)
            break
        case .Up:
            break
        case .Right:
            break
        case .Down:
            break
        case .Left:
            break
        }

        let context = CGBitmapContextCreate(nil, Int(self.size.width), Int(self.size.height), CGImageGetBitsPerComponent(self.CGImage), 0, CGImageGetColorSpace(self.CGImage), CGImageGetBitmapInfo(self.CGImage).rawValue)
        CGContextConcatCTM(context, transform)

        switch (self.imageOrientation) {
        case .Left:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        case .LeftMirrored:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        case .Right:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        case .RightMirrored:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
            break
        default:
            CGContextDrawImage(context, CGRectMake(0, 0, self.size.width, self.size.height), self.CGImage)
            break
        }

        let cgImage = CGBitmapContextCreateImage(context)
        let uiImage = UIImage.init(CGImage: cgImage!)

        return uiImage
    }
}

同样,如果您知道想要图像的所需CGSize(通常可以使用当前视图的边界),那么您也可以将其用作UIImage的扩展名

extension UIImage {
    func resize(newSize: CGSize) -> UIImage {
        UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
        self.drawInRect(CGRectMake(0, 0, newSize.width, newSize.height))
        return UIGraphicsGetImageFromCurrentImageContext()
    }
}

我也很想知道这是否可以重构,但这总是对我有用 - 希望这有帮助!