使用金属间接命令缓冲区时出错:“片段着色器不能与间接命令缓冲区一起使用”

时间:2019-04-01 07:56:38

标签: ios gpu metal deferred-rendering

我正在开发基于金属MTKView的应用程序,该应用程序利用A11 TBDR架构在单个渲染过程中进行延迟着色。我使用Apple的Deferred Lighting sample code作为参考,效果很好。

我想尝试使用A11硬件上的Metal 2的间接命令缓冲区功能,将几何缓冲区传递更改为GPU驱动。

我一直使用Apple的Encoding Indirect Command Buffers on the GPU sample code作为主要参考。我可以在iPhone XR上运行此示例(尽管可能偏离主题,但滚动不平稳,会产生抖动)。

但是,当我尝试将几何缓冲区传递移动到间接命令缓冲区时,我的代码遇到了麻烦。当我在几何缓冲区管线的supportIndirectCommandBuffers上将true设置为MTLRenderPipelineDescriptor时,device.makeRenderPipelineState失败,并显示错误

  

AGXMetalA12代码= 3“片段着色器不能与间接命令缓冲区一起使用”

我无法在文档中找到有关此错误的任何信息。我想知道,在间接管道中是否存在某些片段操作不允许的操作,或者我忽略了对GPU驱动的绘图的某种限制(也许是颜色附件的数量)?

SharedTypes.h

Metal和Swift共享的标题

#ifndef SharedTypes_h
#define SharedTypes_h

#ifdef __METAL_VERSION__

#define NS_CLOSED_ENUM(_type, _name) enum _name : _type _name; enum _name : _type
#define NSInteger metal::int32_t

#else

#import <Foundation/Foundation.h>

#endif

#include <simd/simd.h>

typedef struct {
    uint32_t meshId;
    matrix_float3x3 normalViewMatrix;
    matrix_float4x4 modelMatrix;
    matrix_float4x4 shadowMVPTransformMatrix;
} InstanceData;

typedef struct {
    vector_float3 cameraPosition;
    float voxelScale;
    float blockScale;
    vector_float3 lightDirection;
    matrix_float4x4 viewMatrix;
    matrix_float4x4 projectionMatrix;
    matrix_float4x4 projectionMatrixInverse;
    matrix_float4x4 shadowViewProjectionMatrix;
} VoxelUniforms;

typedef NS_CLOSED_ENUM(NSInteger, BufferIndex)
{
    BufferIndexInstances  = 0,
    BufferIndexVertices = 1,
    BufferIndexIndices = 2,
    BufferIndexVoxelUniforms = 3,
};

typedef NS_CLOSED_ENUM(NSInteger, RenderTarget)
{
    RenderTargetLighting = 0,
    RenderTargetNormal_shadow = 1,
    RenderTargetVoxelIndex = 2,
    RenderTargetDepth = 3,
};

#endif /* SharedTypes_h */

GBuffer着色器

#include <metal_stdlib>
using namespace metal;
#include "../SharedTypes.h"

struct VertexIn {
    packed_half3 position;
    packed_half3 texCoord3D;
    half ambientOcclusion;
    uchar normalIndex;
};

struct VertexInOut {
    float4 position [[ position ]];
    half3 worldPos;
    half3 eyeNormal;
    half3 localPosition;
    half3 localNormal;
    float eyeDepth;
    float3 shadowCoord;
    half3 texCoord3D;
};

vertex VertexInOut gBufferVertex(device InstanceData* instances [[ buffer( BufferIndexInstances ) ]],
                                 device VertexIn* vertices [[ buffer( BufferIndexVertices ) ]],
                                 constant VoxelUniforms &uniforms [[ buffer( BufferIndexVoxelUniforms ) ]],
                                 uint vid [[ vertex_id ]],
                                 ushort iid [[ instance_id ]])
{
    InstanceData instance = instances[iid];
    VertexIn vert = vertices[vid];
    VertexInOut out;
    float4 position = float4(float3(vert.position), 1);
    float4 worldPos = instance.modelMatrix * position;
    float4 eyePosition = uniforms.viewMatrix * worldPos;
    out.position = uniforms.projectionMatrix * eyePosition;
    out.worldPos = half3(worldPos.xyz);
    out.eyeDepth = eyePosition.z;

    half3 normal = normals[vert.normalIndex];
    out.eyeNormal = half3(instance.normalViewMatrix * float3(normal));
    out.shadowCoord = (instance.shadowMVPTransformMatrix * position).xyz;

    out.localPosition = half3(vert.position);
    out.localNormal = normal;
    out.texCoord3D = half3(vert.texCoord3D);
    return out;
}

fragment GBufferData gBufferFragment(VertexInOut in [[ stage_in ]],
                                     constant VoxelUniforms &uniforms [[ buffer( BufferIndexVoxelUniforms ) ]],
                                     texture3d<ushort, access::sample> voxelMap [[ texture(0) ]],
                                     depth2d<float> shadowMap [[ texture(1) ]],
                                     texture3d<half, access::sample> fogOfWarMap [[ texture(2) ]]
                                     ) {
    // voxel index
    half3 center = round(in.texCoord3D);
    uchar voxIndex = voxelMap.read(ushort3(center)).r - 1;

    // ambient occlusion
    half3 neighborPos = center + in.localNormal;
    half3 absNormal = abs(in.localNormal);
    half2 texCoord2D = tc2d(in.localPosition / uniforms.voxelScale, absNormal);
    half ao = getAO(voxelMap, neighborPos, absNormal.yzx, absNormal.zxy, texCoord2D);

    // shadow
    constexpr sampler shadowSampler(coord::normalized,
                                    filter::linear,
                                    mip_filter::none,
                                    address::clamp_to_edge,
                                    compare_func::less);

    float shadow_sample = ambientLightingLevel;
    for (short i = 0; i < shadowSampleCount; i++){
        shadow_sample += shadowMap.sample_compare(shadowSampler, in.shadowCoord.xy + poissonDisk[i] * 0.002, in.shadowCoord.z - 0.0018) * shadowContributionPerSample;
    }
    shadow_sample = min(1.0, shadow_sample);

    //fog-of-war
    half fogOfWarSample = fogOfWarMap.sample(fogOfWarSampler, (float3(in.worldPos) / uniforms.blockScale) + float3(0.5, 0.4, 0.5)).r;
    half notVisible = max(fogOfWarSample, 0.5h);

    // output
    GBufferData out;
    out.normal_shadow = half4(in.eyeNormal, ao * half(shadow_sample) * notVisible);
    out.voxelIndex = voxIndex;
    out.depth = in.eyeDepth;
    return out;
};

管道设置

extension RenderTarget {

    var pixelFormat: MTLPixelFormat {
        switch self {
        case .lighting: return .bgra8Unorm
        case .normal_shadow: return .rgba8Snorm
        case .voxelIndex: return .r8Uint
        case .depth: return .r32Float
        }
    }

    static var allCases: [RenderTarget] = [.lighting, .normal_shadow, .voxelIndex, .depth]
}

public final class GBufferRenderer {
    private let renderPipelineState: MTLRenderPipelineState
    weak var shadowMap: MTLTexture?

    public init(depthPixelFormat: MTLPixelFormat, colorPixelFormat: MTLPixelFormat, sampleCount: Int = 1) throws {
        let library = try LibraryMonad.getLibrary()
        let device = library.device
        let descriptor = MTLRenderPipelineDescriptor()
        descriptor.vertexFunction = library.makeFunction(name: "gBufferVertex")!
        descriptor.fragmentFunction = library.makeFunction(name: "gBufferFragment")!
        descriptor.depthAttachmentPixelFormat = depthPixelFormat
        descriptor.stencilAttachmentPixelFormat = depthPixelFormat
        descriptor.sampleCount = sampleCount
        for target in RenderTarget.allCases {
            descriptor.colorAttachments[target.rawValue].pixelFormat = target.pixelFormat
        }
        // uncomment below to trigger throw
        // descriptor.supportIndirectCommandBuffers = true
        renderPipelineState = try device.makeRenderPipelineState(descriptor: descriptor) // throws "Fragment shader cannot be used with indirect command buffers"
    }

    public convenience init(mtkView: MTKView) throws {
        try self.init(depthPixelFormat: mtkView.depthStencilPixelFormat, colorPixelFormat: mtkView.colorPixelFormat, sampleCount: mtkView.sampleCount)
    }
}

当以通常的方式从CPU触发绘图时,上述方法效果很好,但是在为GPU绘图做准备时设置supportIndirectCommandBuffers会引发错误。

我尝试剥离片段着色器以仅返回GBuffers的常量值,然后makeRenderPipelineState成功,但是当我在其中添加纹理采样时,它又开始抱怨了。我似乎无法确定关于frag shader到底不想要的东西。

1 个答案:

答案 0 :(得分:0)

仔细阅读代码,Metal文档和Metal Shading Language规范,我想我知道为什么会出现此错误。

如果您浏览Metal中render_command标头中的metal_command_buffer界面,则会发现要将参数传递给间接渲染命令,则只有以下功能:set_vertex_bufferset_fragment_buffer,没有set_vertex_texture中的set_vertex_samplerMTLRenderCommandEncoder

但是,由于您的管道使用的着色器又将纹理用作参数,并且您使用supportIndirectCommandBuffers表示要在间接命令中使用此管道,因此Metal只能使管道创建失败。 / p>

相反,如果要将纹理或采样器传递给间接渲染命令,则应使用参数缓冲区,然后将其传递给发出间接渲染命令的着色器,然后使用set_vertex_buffer和{每个set_fragment_buffer的{​​1}}。

规范:Metal Shading Language Specification(第5.16节)