将SceneKit场景渲染为视频输出

时间:2015-03-15 12:13:31

标签: ios macos video rendering scenekit

作为一个主要的高级/ iOS开发者,我对使用SceneKit进行动画项目感兴趣。

我现在已经玩了几个月的SceneKit,尽管它显然是专为“现场”而设计的。互动,我会发现能够呈现'非常有用。一个SKScene到视频。目前,我一直在使用Quicktime的屏幕录像机来捕获视频输出,但(当然)帧速率会下降。是否有一种替代方案可以按照自己的节奏渲染场景并输出为平滑的视频文件?

我知道这不可能实现......只是想我会问我以后错过了一些低级别的东西!

4 个答案:

答案 0 :(得分:5)

您可以使用SCNRenderer渲染到屏幕外的CGImage,然后使用AVFoundation将CGImage添加到视频流中。

我写了这个Swift扩展来渲染成CGImage。

public extension SCNRenderer {

    public func renderToImageSize(size: CGSize, floatComponents: Bool, atTime time: NSTimeInterval) -> CGImage? {

        var thumbnailCGImage: CGImage?

        let width = GLsizei(size.width), height = GLsizei(size.height)
        let samplesPerPixel = 4

        #if os(iOS)
            let oldGLContext = EAGLContext.currentContext()
            let glContext = unsafeBitCast(context, EAGLContext.self)

            EAGLContext.setCurrentContext(glContext)
            objc_sync_enter(glContext)
        #elseif os(OSX)
            let oldGLContext = CGLGetCurrentContext()
            let glContext = unsafeBitCast(context, CGLContextObj.self)

            CGLSetCurrentContext(glContext)
            CGLLockContext(glContext)
        #endif

        // set up the OpenGL buffers
        var thumbnailFramebuffer: GLuint = 0
        glGenFramebuffers(1, &thumbnailFramebuffer)
        glBindFramebuffer(GLenum(GL_FRAMEBUFFER), thumbnailFramebuffer); checkGLErrors()

        var colorRenderbuffer: GLuint = 0
        glGenRenderbuffers(1, &colorRenderbuffer)
        glBindRenderbuffer(GLenum(GL_RENDERBUFFER), colorRenderbuffer)
        if floatComponents {
            glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA16F), width, height)
        } else {
            glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_RGBA8), width, height)
        }
        glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_COLOR_ATTACHMENT0), GLenum(GL_RENDERBUFFER), colorRenderbuffer); checkGLErrors()

        var depthRenderbuffer: GLuint = 0
        glGenRenderbuffers(1, &depthRenderbuffer)
        glBindRenderbuffer(GLenum(GL_RENDERBUFFER), depthRenderbuffer)
        glRenderbufferStorage(GLenum(GL_RENDERBUFFER), GLenum(GL_DEPTH_COMPONENT24), width, height)
        glFramebufferRenderbuffer(GLenum(GL_FRAMEBUFFER), GLenum(GL_DEPTH_ATTACHMENT), GLenum(GL_RENDERBUFFER), depthRenderbuffer); checkGLErrors()

        let framebufferStatus = Int32(glCheckFramebufferStatus(GLenum(GL_FRAMEBUFFER)))
        assert(framebufferStatus == GL_FRAMEBUFFER_COMPLETE)
        if framebufferStatus != GL_FRAMEBUFFER_COMPLETE {
            return nil
        }

        // clear buffer
        glViewport(0, 0, width, height)
        glClear(GLbitfield(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)); checkGLErrors()

        // render
        renderAtTime(time); checkGLErrors()

        // create the image
        if floatComponents { // float components (16-bits of actual precision)

            // slurp bytes out of OpenGL
            typealias ComponentType = Float

            var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0)
            glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_FLOAT), &imageRawBuffer)

            // flip image vertically — OpenGL has a different 'up' than CoreGraphics
            let rowLength = Int(width) * samplesPerPixel
            for rowIndex in 0..<(Int(height) / 2) {
                let baseIndex = rowIndex * rowLength
                let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength

                swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)])
            }

            // make the CGImage
            var imageBuffer = vImage_Buffer(
                data: UnsafeMutablePointer<Float>(imageRawBuffer),
                height: vImagePixelCount(height),
                width: vImagePixelCount(width),
                rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel)

            var format = vImage_CGImageFormat(
                bitsPerComponent: UInt32(sizeof(ComponentType) * 8),
                bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8),
                colorSpace: nil, // defaults to sRGB
                bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue | CGBitmapInfo.FloatComponents.rawValue),
                version: UInt32(0),
                decode: nil,
                renderingIntent: kCGRenderingIntentDefault)

            var error: vImage_Error = 0
            thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue()

        } else { // byte components

            // slurp bytes out of OpenGL
            typealias ComponentType = UInt8

            var imageRawBuffer = [ComponentType](count: Int(width * height) * samplesPerPixel * sizeof(ComponentType), repeatedValue: 0)
            glReadPixels(GLint(0), GLint(0), width, height, GLenum(GL_RGBA), GLenum(GL_UNSIGNED_BYTE), &imageRawBuffer)

            // flip image vertically — OpenGL has a different 'up' than CoreGraphics
            let rowLength = Int(width) * samplesPerPixel
            for rowIndex in 0..<(Int(height) / 2) {
                let baseIndex = rowIndex * rowLength
                let destinationIndex = (Int(height) - 1 - rowIndex) * rowLength

                swap(&imageRawBuffer[baseIndex..<(baseIndex + rowLength)], &imageRawBuffer[destinationIndex..<(destinationIndex + rowLength)])
            }

            // make the CGImage
            var imageBuffer = vImage_Buffer(
                data: UnsafeMutablePointer<Float>(imageRawBuffer),
                height: vImagePixelCount(height),
                width: vImagePixelCount(width),
                rowBytes: Int(width) * sizeof(ComponentType) * samplesPerPixel)

            var format = vImage_CGImageFormat(
                bitsPerComponent: UInt32(sizeof(ComponentType) * 8),
                bitsPerPixel: UInt32(sizeof(ComponentType) * samplesPerPixel * 8),
                colorSpace: nil, // defaults to sRGB
                bitmapInfo: CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue),
                version: UInt32(0),
                decode: nil,
                renderingIntent: kCGRenderingIntentDefault)

            var error: vImage_Error = 0
            thumbnailCGImage = vImageCreateCGImageFromBuffer(&imageBuffer, &format, nil, nil, vImage_Flags(kvImagePrintDiagnosticsToConsole), &error)!.takeRetainedValue()
        }

        #if os(iOS)
            objc_sync_exit(glContext)
            if oldGLContext != nil {
                EAGLContext.setCurrentContext(oldGLContext)
            }
        #elseif os(OSX)
            CGLUnlockContext(glContext)
            if oldGLContext != nil {
                CGLSetCurrentContext(oldGLContext)
            }
        #endif

        return thumbnailCGImage
    }
}


func checkGLErrors() {
    var glError: GLenum
    var hadError = false
    do {
        glError = glGetError()
        if glError != 0 {
            println(String(format: "OpenGL error %#x", glError))
            hadError = true
        }
    } while glError != 0
    assert(!hadError)
}

答案 1 :(得分:4)

**这是使用Metal的SceneKit的答案。

**警告:这可能不适合App Store。但它正在发挥作用。

步骤1:使用swizzling交换CAMetalLayer的nextDrawable方法。 为每个渲染循环保存CAMetalDrawable。

Monday

第2步: 在AppDelegate中设置调配:didFinishLaunchingWithOptions

extension CAMetalLayer {
  public static func setupSwizzling() {
    struct Static {
      static var token: dispatch_once_t = 0
    }

    dispatch_once(&Static.token) {
      let copiedOriginalSelector = #selector(CAMetalLayer.orginalNextDrawable)
      let originalSelector = #selector(CAMetalLayer.nextDrawable)
      let swizzledSelector = #selector(CAMetalLayer.newNextDrawable)

      let copiedOriginalMethod = class_getInstanceMethod(self, copiedOriginalSelector)
      let originalMethod = class_getInstanceMethod(self, originalSelector)
      let swizzledMethod = class_getInstanceMethod(self, swizzledSelector)

      let oldImp = method_getImplementation(originalMethod)
      method_setImplementation(copiedOriginalMethod, oldImp)
      method_exchangeImplementations(originalMethod, swizzledMethod)
    }
  }


  func newNextDrawable() -> CAMetalDrawable? {
    let drawable = orginalNextDrawable()
    // Save the drawable to any where you want
    AppManager.sharedInstance.currentSceneDrawable = drawable
    return drawable
  }

  func orginalNextDrawable() -> CAMetalDrawable? {
    // This is just a placeholder. Implementation will be replaced with nextDrawable.
    return nil
  }
}

第3步: 为您的SCNView的CAMetalLayer禁用framebufferOnly(为了调用MTLTexture的getBytes)

func application(application: UIApplication, didFinishLaunchingWithOptions launchOptions: [NSObject: AnyObject]?) -> Bool {
  CAMetalLayer.setupSwizzling()
  return true
}

第4步: 在SCNView的委托(SCNSceneRendererDelegate)中,使用纹理

if let metalLayer = scnView.layer as? CAMetalLayer {
  metalLayer.framebufferOnly = false
}

步骤5(可选): 您可能需要确认您获得的CAMetalLayer上的drawable是您的目标。 (如果同时有多个CAMetalLayer)

答案 2 :(得分:2)

实际上很容易!这是我如何做的伪代码(在SCNView上):

int numberOfFrames = 300;
int currentFrame = 0;
int framesPerSecond = 30;

-(void) renderAFrame{
    [self renderAtTime:1/framesPerSecond];

    NSImage *frame = [self snapshot];

    // save the image with the frame number in the name such as f_001.png

    currentFrame++;

    if(currentFrame < numberOfFrames){
        [self renderAFrame];
    }

}

它将输出一系列图像,以每秒30帧的速度渲染,您可以在任何编辑软件中导入并转换为视频。

答案 3 :(得分:1)

您可以使用SKVideoNode以这种方式执行此操作,并将其放入SKScene中,您可以将其用作SCNode的SCMaterial.Diffuse.Content(希望明白;))

        player                                                              = AVPlayer(URL: fileURL!)
        let videoSpriteKitNodeLeft                                          = SKVideoNode(AVPlayer: player)
        let videoNodeLeft                                                   = SCNNode()
        let spriteKitScene1                                                 = SKScene(size: CGSize(width: 1280 * screenScale, height: 1280 * screenScale))
        spriteKitScene1.shouldRasterize                                     = true

        videoNodeLeft.geometry                                              = SCNSphere(radius: 30)
        spriteKitScene1.scaleMode                                           = .AspectFit
        videoSpriteKitNodeLeft.position                                     = CGPoint(x: spriteKitScene1.size.width / 2.0, y: spriteKitScene1.size.height / 2.0)
        videoSpriteKitNodeLeft.size                                         = spriteKitScene1.size

        spriteKitScene1.addChild(videoSpriteKitNodeLeft)

        videoNodeLeft.geometry?.firstMaterial?.diffuse.contents             = spriteKitScene1
        videoNodeLeft.geometry?.firstMaterial?.doubleSided                  = true

        // Flip video upside down, so that it's shown in the right position
        var transform                                                       = SCNMatrix4MakeRotation(Float(M_PI), 0.0, 0.0, 1.0)                transform                                                           = SCNMatrix4Translate(transform, 1.0, 1.0, 0.0)

        videoNodeLeft.pivot                                                 = SCNMatrix4MakeRotation(Float(M_PI_2), 0.0, -1.0, 0.0)
        videoNodeLeft.geometry?.firstMaterial?.diffuse.contentsTransform    = transform

        videoNodeLeft.position                                              = SCNVector3(x: 0, y: 0, z: 0)

        scene.rootNode.addChildNode(videoNodeLeft)

我从我的github项目中提取代码,用于使用SceneKit在3D Sphere中播放视频的360视频播放器:https://github.com/Aralekk/simple360player_iOS/blob/master/simple360player/ViewController.swift

我希望这有帮助!

亚瑟