我正在使用SceneKit shader modifiers向我的应用添加一些视觉元素,如下所示:
// A SceneKit scene with orthographic projection
let shaderBundle = Bundle(for: Self.self)
let shaderUrl = shaderBundle.url(forResource: "MyShader.frag", withExtension: nil)!
let shaderString = try! String(contentsOf: shaderUrl)
let plane = SCNPlane(width: 512, height: 512) // 1024x1024 pixels on devices with x2 screen resolution
plane.firstMaterial!.shaderModifiers = [SCNShaderModifierEntryPoint.fragment: shaderString]
let planeNode = SCNNode(geometry: plane)
rootNode.addChildNode(planeNode)
问题在于性能降低,因为SceneKit会精心渲染正在渲染着色器的平面的每个像素。如何在不改变平原尺寸的情况下降低着色器的分辨率?
我已经尝试过缩小plane
并在planeNode
上使用放大的比例转换,但是毫无效果,着色器的渲染仍然像以前一样高度详细。
使用plane.firstMaterial!.diffuse.contentsTransform
也没有帮助(或者我做错了)。
我知道,如果该着色器是场景中的唯一节点,但我可以将全局SCNView
缩小,然后应用仿射比例变换,但事实并非如此,在该着色器中还有其他节点(不是着色器)同一场景,我宁愿避免以任何方式更改其外观。
答案 0 :(得分:2)
似乎我设法通过将SceneKit场景嵌套在由顶级SceneKit场景显示的SpriteKit场景中,从而使用一种“渲染到纹理”方法来解决了这个问题。
更详细地讲,SCNNode
的以下子类是在SpriteKit的SK3DNode
内放置一个缩小的着色器平面,然后将其SK3DNode
放在SpriteKit场景内作为SceneKit的SKScene
,然后将其SKScene
用作放置在顶级SceneKit场景内的高档平面的漫反射内容。
奇怪的是,为了保持原始分辨率,我需要使用scaleFactor*2
,因此,为了使渲染分辨率减半(通常是比例因子0.5),我实际上需要使用scaleFactor = 1
。
如果有人碰巧知道这种奇怪行为的原因或解决方法,请在评论中让我知道。
import Foundation
import SceneKit
import SpriteKit
class ScaledResolutionFragmentShaderModifierPlaneNode: SCNNode {
private static let nestedSCNSceneFrustumLength: CGFloat = 8
// For shader parameter input
let shaderPlaneMaterial: SCNMaterial
// shaderModifier: the shader
// planeSize: the size of the shader on the screen
// scaleFactor: the scale to be used for the shader's rendering resolution; the lower, the faster
init(shaderModifier: String, planeSize: CGSize, scaleFactor: CGFloat) {
let scaledSize = CGSize(width: planeSize.width*scaleFactor, height: planeSize.height*scaleFactor)
// Nested SceneKit scene with orthographic projection
let nestedSCNScene = SCNScene()
let camera = SCNCamera()
camera.zFar = Double(Self.nestedSCNSceneFrustumLength)
camera.usesOrthographicProjection = true
camera.orthographicScale = Double(scaledSize.height/2)
let cameraNode = SCNNode()
cameraNode.camera = camera
cameraNode.simdPosition = simd_float3(x: 0, y: 0, z: Float(Self.nestedSCNSceneFrustumLength/2))
nestedSCNScene.rootNode.addChildNode(cameraNode)
let shaderPlane = SCNPlane(width: scaledSize.width, height: scaledSize.height)
shaderPlaneMaterial = shaderPlane.firstMaterial!
shaderPlaneMaterial.shaderModifiers = [SCNShaderModifierEntryPoint.fragment: shaderModifier]
let shaderPlaneNode = SCNNode(geometry: shaderPlane)
nestedSCNScene.rootNode.addChildNode(shaderPlaneNode)
// Intermediary SpriteKit scene
let nestedSCNSceneSKNode = SK3DNode(viewportSize: scaledSize)
nestedSCNSceneSKNode.scnScene = nestedSCNScene
nestedSCNSceneSKNode.position = CGPoint(x: scaledSize.width/2, y: scaledSize.height/2)
nestedSCNSceneSKNode.isPlaying = true
let intermediarySKScene = SKScene(size: scaledSize)
intermediarySKScene.backgroundColor = .clear
intermediarySKScene.addChild(nestedSCNSceneSKNode)
let intermediarySKScenePlane = SCNPlane(width: scaledSize.width, height: scaledSize.height)
intermediarySKScenePlane.firstMaterial!.diffuse.contents = intermediarySKScene
let intermediarySKScenePlaneNode = SCNNode(geometry: intermediarySKScenePlane)
let invScaleFactor = 1/Float(scaleFactor)
intermediarySKScenePlaneNode.simdScale = simd_float3(x: invScaleFactor, y: invScaleFactor, z: 1)
super.init()
addChildNode(intermediarySKScenePlaneNode)
}
required init?(coder: NSCoder) {
fatalError()
}
}
答案 1 :(得分:1)
通常,如果没有在Metal中使用variable rasterization rate或在其他地方使用variable rate shading的全新GPU功能,则无法使场景中的一个对象以不同于其他对象的分辨率运行其片段着色器现场。
在这种情况下,根据您的设置,您可能可以使用SCNTechnique以不同的分辨率在单独的通道中渲染平面,然后以相同的方式将其合成到场景中一些游戏引擎render particles at a lower resolution以节省填充率。这是一个例子。
首先,您需要在项目中添加一个Metal文件(如果已有的话,只需添加即可),其中包含以下内容:
#include <SceneKit/scn_metal>
struct QuadVertexIn {
float3 position [[ attribute(SCNVertexSemanticPosition) ]];
float2 uv [[ attribute(SCNVertexSemanticTexcoord0) ]];
};
struct QuadVertexOut {
float4 position [[ position ]];
float2 uv;
};
vertex QuadVertexOut quadVertex(QuadVertexIn v [[ stage_in ]]) {
QuadVertexOut o;
o.position = float4(v.position.x, -v.position.y, 1, 1);
o.uv = v.uv;
return o;
}
constexpr sampler compositingSampler(coord::normalized, address::clamp_to_edge, filter::linear);
fragment half4 compositeFragment(QuadVertexOut v [[ stage_in ]], texture2d<half, access::sample> compositeInput [[ texture(0) ]]) {
return compositeInput.sample(compositingSampler, v.uv);
}
然后,在您的SceneKit代码中,您可以设置并应用如下技术:
let technique = SCNTechnique(dictionary: [
"passes": ["drawLowResStuff":
["draw": "DRAW_SCENE",
// only draw nodes that are in this category
"includeCategoryMask": 2,
"colorStates": ["clear": true, "clearColor": "0.0"],
"outputs": ["color": "lowResStuff"]],
"drawScene":
["draw": "DRAW_SCENE",
// don’t draw nodes that are in the low-res-stuff category
"excludeCategoryMask": 2,
"colorStates": ["clear": true, "clearColor": "sceneBackground"],
"outputs": ["color": "COLOR"]],
"composite":
["draw": "DRAW_QUAD",
"metalVertexShader": "quadVertex",
"metalFragmentShader": "compositeFragment",
// don’t clear what’s currently there (the rest of the scene)
"colorStates": ["clear": false],
// use alpha blending
"blendStates": ["enable": true, "colorSrc": "srcAlpha", "colorDst": "oneMinusSrcAlpha"],
// supply the lowResStuff render target to the fragment shader
"inputs": ["compositeInput": "lowResStuff"],
// draw into the main color render target
"outputs": ["color": "COLOR"]]
],
"sequence": ["drawLowResStuff", "drawScene", "composite"],
"targets": ["lowResStuff": ["type": "color", "scaleFactor": 0.5]]
])
// mark the plane node as belonging to the category of stuff that gets drawn in the low-res pass
myPlaneNode.categoryBitMask = 2
// apply the technique to the scene view
mySceneView.technique = technique
在一个由两个具有相同纹理的球体组成的测试场景中,scaleFactor
设置为0.25而不是0.5来放大效果,结果看起来像这样。
如果您希望使用清晰的像素而不是上面描述的模糊调整大小,请在“金属”代码中将filter::linear
更改为filter::nearest
。另外,请注意,合成的低分辨率内容未考虑深度缓冲区,因此,如果您的飞机应该出现在其他对象“后面”,则您必须在合成功能中进行更多工作才能修复那个。