此着色器(末尾的代码)使用光线匹配来渲染过程几何体:
然而,在图像(上图)中,背景中的立方体应该部分遮挡粉红色固体;它不是因为:
struct fragmentOutput {
float4 color : SV_Target;
float zvalue : SV_Depth;
};
fragmentOutput frag(fragmentInput i) {
fragmentOutput o;
...
o.zvalue = IF(output[1] > 0, 0, 1);
}
然而,我不能在我的生活中弄清楚如何在这里正确生成深度值,正确地允许光线移动的实体模糊/不掩盖场景中的其他几何体。
我知道这是可能的,因为这里有一个有效的例子:https://github.com/i-saint/RaymarchingOnUnity5(相关的日语博客http://i-saint.hatenablog.com/)
然而,它在日语中,并且基本上没有文档记录,而且非常复杂。
我正在寻找同样的东西的极简化版本,可以从中构建。
在着色器中,我目前正在使用片段程序行:
float2 output = march_raycast(i.worldpos, i.viewdir, _far, _step);
将四边形上的输入点p映射到需要相机(此着色器连接到它)上的输出float2(密度,距离),其中距离是从四边形到“点”的距离&#39 ;在程序表面上。
问题是,如何以任何有用的方式将其映射到深度缓冲区?
完整的着色器在这里,要使用它,创建一个球体在0,0,0,大小至少为50的新场景,并为其指定着色器:
Shader "Shaders/Raymarching/BasicMarch" {
Properties {
_sun ("Sun", Vector) = (0, 0, 0, 0)
_far ("Far Depth Value", Float) = 20
_edgeFuzz ("Edge fuzziness", Range(1, 20)) = 1.0
_lightStep ("Light step", Range(0.1, 5)) = 1.0
_step ("Raycast step", Range(0.1, 5)) = 1.0
_dark ("Dark value", Color) = (0, 0, 0, 0)
_light ("Light Value", Color) = (1, 1, 1, 1)
[Toggle] _debugDepth ("Display depth field", Float) = 0
[Toggle] _debugLight ("Display light field", Float) = 0
}
SubShader {
Tags {"Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent"}
Blend SrcAlpha OneMinusSrcAlpha
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 3.0
#include "UnityCG.cginc"
#include "UnityLightingCommon.cginc" // for _LightColor0
#define IF(a, b, c) lerp(b, c, step((fixed) (a), 0));
uniform float _far;
uniform float _lightStep;
uniform float3 _sun;
uniform float4 _light;
uniform float4 _dark;
uniform float _debugDepth;
uniform float _debugLight;
uniform float _edgeFuzz;
uniform float _step;
/**
* Sphere at origin c, size s
* @param center_ The center of the sphere
* @param radius_ The radius of the sphere
* @param point_ The point to check
*/
float geom_soft_sphere(float3 center_, float radius_, float3 point_) {
float rtn = distance(center_, point_);
return IF(rtn < radius_, (radius_ - rtn) / radius_ / _edgeFuzz, 0);
}
/**
* A rectoid centered at center_
* @param center_ The center of the cube
* @param halfsize_ The halfsize of the cube in each direction
*/
float geom_rectoid(float3 center_, float3 halfsize_, float3 point_) {
float rtn = IF((point_[0] < (center_[0] - halfsize_[0])) || (point_[0] > (center_[0] + halfsize_[0])), 0, 1);
rtn = rtn * IF((point_[1] < (center_[1] - halfsize_[1])) || (point_[1] > (center_[1] + halfsize_[1])), 0, 1);
rtn = rtn * IF((point_[2] < (center_[2] - halfsize_[2])) || (point_[2] > (center_[2] + halfsize_[2])), 0, 1);
rtn = rtn * distance(point_, center_);
float radius = length(halfsize_);
return IF(rtn > 0, (radius - rtn) / radius / _edgeFuzz, 0);
}
/**
* Calculate procedural geometry.
* Return (0, 0, 0) for empty space.
* @param point_ A float3; return the density of the solid at p.
* @return The density of the procedural geometry of p.
*/
float march_geometry(float3 point_) {
return
geom_rectoid(float3(0, 0, 0), float3(7, 7, 7), point_) +
geom_soft_sphere(float3(10, 0, 0), 7, point_) +
geom_soft_sphere(float3(-10, 0, 0), 7, point_) +
geom_soft_sphere(float3(0, 0, 10), 7, point_) +
geom_soft_sphere(float3(0, 0, -10), 7, point_);
}
/** Return a randomish value to sample step with */
float rand(float3 seed) {
return frac(sin(dot(seed.xyz ,float3(12.9898,78.233,45.5432))) * 43758.5453);
}
/**
* March the point p along the cast path c, and return a float2
* which is (density, depth); if the density is 0 no match was
* found in the given depth domain.
* @param point_ The origin point
* @param cast_ The cast vector
* @param max_ The maximum depth to step to
* @param step_ The increment to step in
* @return (denity, depth)
*/
float2 march_raycast(float3 point_, float3 cast_, float max_, float step_) {
float origin_ = point_;
float depth_ = 0;
float density_ = 0;
int steps = floor(max_ / step_);
for (int i = 0; (density_ <= 1) && (i < steps); ++i) {
float3 target_ = point_ + cast_ * i * step_ + rand(point_) * cast_ * step_;
density_ += march_geometry(target_);
depth_ = IF((depth_ == 0) && (density_ != 0), distance(point_, target_), depth_);
}
density_ = IF(density_ > 1, 1, density_);
return float2(density_, depth_);
}
/**
* Simple lighting; raycast from depth point to light source, and get density on path
* @param point_ The origin point on the render target
* @param cast_ The original cast (ie. camera view direction)
* @param raycast_ The result of the original raycast
* @param max_ The max distance to cast
* @param step_ The step increment
*/
float2 march_lighting(float3 point_, float3 cast_, float2 raycast_, float max_, float step_) {
float3 target_ = point_ + cast_ * raycast_[1];
float3 lcast_ = normalize(_sun - target_);
return march_raycast(target_, lcast_, max_, _lightStep);
}
struct fragmentInput {
float4 position : SV_POSITION;
float4 worldpos : TEXCOORD0;
float3 viewdir : TEXCOORD1;
};
struct fragmentOutput {
float4 color : SV_Target;
float zvalue : SV_Depth;
};
fragmentInput vert(appdata_base i) {
fragmentInput o;
o.position = mul(UNITY_MATRIX_MVP, i.vertex);
o.worldpos = mul(_Object2World, i.vertex);
o.viewdir = -normalize(WorldSpaceViewDir(i.vertex));
return o;
}
fragmentOutput frag(fragmentInput i) {
fragmentOutput o;
// Raycast
float2 output = march_raycast(i.worldpos, i.viewdir, _far, _step);
float2 light = march_lighting(i.worldpos, i.viewdir, output, _far, _step);
float lvalue = 1.0 - light[0];
float depth = output[1] / _far;
// Generate fragment color
float4 color = lerp(_light, _dark, lvalue);
// Debugging: Depth
float4 debug_depth = float4(depth, depth, depth, 1);
color = IF(_debugDepth, debug_depth, color);
// Debugging: Color
float4 debug_light = float4(lvalue, lvalue, lvalue, 1);
color = IF(_debugLight, debug_light, color);
// Always apply the depth map
color.a = output[0];
o.zvalue = IF(output[1] > 0, 0, 1);
o.color = IF(output[1] <= 0, 0, color);
return o;
}
ENDCG
}
}
}
(是的,我知道它非常复杂,但要将这种着色器简化为一个简单的测试用例并且非常困难)
我很乐意接受任何对上面着色器进行修改的答案,这样可以使程序实体变得模糊/模糊场景中的其他几何体,就好像它是真正的几何体一样。< / p>
-
编辑:你可以得到这个&#39;工作&#39;通过使用与raymarcher相同的深度函数在场景中的其他几何体上显式设置深度值:
...但是,我仍然无法使用&#39;标准&#39;着色器。仍在寻找可行的解决方案......
答案 0 :(得分:5)
查看您链接到的项目,我看到的最重要的区别是他们的raycast march function使用pass-by-reference参数来返回名为ray_pos
的片段位置。该位置似乎位于对象空间中,因此它们transform it using the view-projection matrix获取剪辑空间并读取深度值。
该项目还有一个compute_depth
功能,但it looks pretty simple。
您的march_raycast
函数已在计算target_
位置,因此您可以重构一下,应用out
关键字将其返回给调用者,并在深度计算中使用它:
//get position using pass-by-ref
float3 ray_pos = i.worldpos;
float2 output = march_raycast(ray_pos, i.viewdir, _far, _step);
...
//convert position to clip space, read depth
float4 clip_pos = mul(UNITY_MATRIX_VP, float4(ray_pos, 1.0));
o.zvalue = clip_pos.z / clip_pos.w;
答案 1 :(得分:3)
渲染设置可能存在问题。
要允许着色器输出每像素深度,必须禁用深度测试。否则,GPU会 - 为了优化 - 假设所有像素的深度都是顶点的插值深度。
由于着色器不进行深度测试,必须在执行的几何体之前渲染,否则它将覆盖其他几何体写入深度缓冲区的任何内容。
但必须启用深度写入,否则像素着色器的深度输出将被忽略,不会写入深度缓冲区。
您的RenderType是透明的,我认为应该禁用深度写入。那将是一个问题。 你的队列也是透明的,它应该在所有实体几何体后渲染,然后回到前面,这也是一个问题,因为我们已经得出结论,我们必须先渲染。
所以