我目前正在尝试将一个shadertoy.com着色器(Atmospheric Scattering Sample,带代码的交互式演示)移植到Unity。着色器是用GLSL编写的,我必须使用C:\Program Files\Unity\Editor>Unity.exe -force-opengl
启动编辑器以使其渲染着色器(否则为#34;此着色器无法在此GPU上运行"出现错误),但是& #39;现在不是问题。问题是将着色器移植到Unity。
散射等功能都是相同的,并且可以运行"在我的移植着色器中,唯一的事情是mainImage()
功能管理相机,光线方向和光线方向本身。必须改变这一点,以便使用Unity的摄像机位置,视图方向以及光源和方向。
原作的主要功能如下:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// default ray dir
vec3 dir = ray_dir( 45.0, iResolution.xy, fragCoord.xy );
// default ray origin
vec3 eye = vec3( 0.0, 0.0, 2.4 );
// rotate camera
mat3 rot = rot3xy( vec2( 0.0, iGlobalTime * 0.5 ) );
dir = rot * dir;
eye = rot * eye;
// sun light dir
vec3 l = vec3( 0, 0, 1 );
vec2 e = ray_vs_sphere( eye, dir, R );
if ( e.x > e.y ) {
discard;
}
vec2 f = ray_vs_sphere( eye, dir, R_INNER );
e.y = min( e.y, f.x );
vec3 I = in_scatter( eye, dir, e, l );
fragColor = vec4( I, 1.0 );
}
我已经阅读了该功能的文档及其在https://www.shadertoy.com/howto的工作方式。
图像着色器实现mainImage()函数以生成 通过计算每个像素的颜色来计算程序图像。这个 期望每个像素调用一次函数,它就是 主机应用程序的责任,以提供正确的输入 它并从中获取输出颜色并将其指定给屏幕像素。 原型是:
void mainImage(在vec2 fragCoord中输出vec4 fragColor);
其中fragCoord包含着色器的像素坐标 需要计算颜色。坐标以像素为单位,范围 从0.5到分辨率-0.5,在渲染表面上,其中 分辨率通过iResolution制服传递到着色器 (见下文)。
将得到的颜色作为四个分量收集在fragColor中 向量,客户端忽略最后一个。结果是 聚集在一起" out"在预测未来添加的变量 多个渲染目标。
因此,在该函数中,有iGlobalTime
引用使相机随时间旋转,并引用iResolution
作为分辨率。我已将着色器嵌入到Unity着色器中,并尝试修复并连接dir
,eye
和l
它与Unity配合使用,但我完全陷入困境。我得到的某些图片看起来与#34;相关"到原始着色器:(顶部是原始的,但是当前的统一状态)
我不是专业的着色器,我只知道OpenGL的一些基础知识,但在大多数情况下,我用C#编写游戏逻辑,所以我真正做的就是查看其他着色器示例并查看如何我可以在这段代码中获得有关摄像头,光源等的数据,但正如你所看到的,真的没什么用。
我已经从https://en.wikibooks.org/wiki/GLSL_Programming/Unity/Specular_Highlights复制了着色器的skelton代码,并从http://forum.unity3d.com/threads/glsl-shader.39629/复制了一些向量。
我希望有人可以指点我如何修复此着色器/正确将其移植到统一。下面是当前着色器代码,您需要做的就是在空白项目中创建一个新着色器,在内部复制该代码,制作新材质,将着色器指定给该材质,然后添加一个球体并添加该材质在它上面并添加定向灯。
Shader "Unlit/AtmoFragShader" {
Properties{
_MainTex("Base (RGB)", 2D) = "white" {}
_LC("LC", Color) = (1,0,0,0) /* stuff from the testing shader, now really used */
_LP("LP", Vector) = (1,1,1,1)
}
SubShader{
Tags{ "Queue" = "Geometry" } //Is this even the right queue?
Pass{
//Tags{ "LightMode" = "ForwardBase" }
GLSLPROGRAM
/* begin port by copying in the constants */
// math const
const float PI = 3.14159265359;
const float DEG_TO_RAD = PI / 180.0;
const float MAX = 10000.0;
// scatter const
const float K_R = 0.166;
const float K_M = 0.0025;
const float E = 14.3; // light intensity
const vec3 C_R = vec3(0.3, 0.7, 1.0); // 1 / wavelength ^ 4
const float G_M = -0.85; // Mie g
const float R = 1.0; /* this is the radius of the spehere? this should be set from the geometry or something.. */
const float R_INNER = 0.7;
const float SCALE_H = 4.0 / (R - R_INNER);
const float SCALE_L = 1.0 / (R - R_INNER);
const int NUM_OUT_SCATTER = 10;
const float FNUM_OUT_SCATTER = 10.0;
const int NUM_IN_SCATTER = 10;
const float FNUM_IN_SCATTER = 10.0;
/* begin functions. These are out of the defines because they should be accesible to anyone. */
// angle : pitch, yaw
mat3 rot3xy(vec2 angle) {
vec2 c = cos(angle);
vec2 s = sin(angle);
return mat3(
c.y, 0.0, -s.y,
s.y * s.x, c.x, c.y * s.x,
s.y * c.x, -s.x, c.y * c.x
);
}
// ray direction
vec3 ray_dir(float fov, vec2 size, vec2 pos) {
vec2 xy = pos - size * 0.5;
float cot_half_fov = tan((90.0 - fov * 0.5) * DEG_TO_RAD);
float z = size.y * 0.5 * cot_half_fov;
return normalize(vec3(xy, -z));
}
// ray intersects sphere
// e = -b +/- sqrt( b^2 - c )
vec2 ray_vs_sphere(vec3 p, vec3 dir, float r) {
float b = dot(p, dir);
float c = dot(p, p) - r * r;
float d = b * b - c;
if (d < 0.0) {
return vec2(MAX, -MAX);
}
d = sqrt(d);
return vec2(-b - d, -b + d);
}
// Mie
// g : ( -0.75, -0.999 )
// 3 * ( 1 - g^2 ) 1 + c^2
// F = ----------------- * -------------------------------
// 2 * ( 2 + g^2 ) ( 1 + g^2 - 2 * g * c )^(3/2)
float phase_mie(float g, float c, float cc) {
float gg = g * g;
float a = (1.0 - gg) * (1.0 + cc);
float b = 1.0 + gg - 2.0 * g * c;
b *= sqrt(b);
b *= 2.0 + gg;
return 1.5 * a / b;
}
// Reyleigh
// g : 0
// F = 3/4 * ( 1 + c^2 )
float phase_reyleigh(float cc) {
return 0.75 * (1.0 + cc);
}
float density(vec3 p) {
return exp(-(length(p) - R_INNER) * SCALE_H);
}
float optic(vec3 p, vec3 q) {
vec3 step = (q - p) / FNUM_OUT_SCATTER;
vec3 v = p + step * 0.5;
float sum = 0.0;
for (int i = 0; i < NUM_OUT_SCATTER; i++) {
sum += density(v);
v += step;
}
sum *= length(step) * SCALE_L;
return sum;
}
vec3 in_scatter(vec3 o, vec3 dir, vec2 e, vec3 l) {
float len = (e.y - e.x) / FNUM_IN_SCATTER;
vec3 step = dir * len;
vec3 p = o + dir * e.x;
vec3 v = p + dir * (len * 0.5);
vec3 sum = vec3(0.0);
for (int i = 0; i < NUM_IN_SCATTER; i++) {
vec2 f = ray_vs_sphere(v, l, R);
vec3 u = v + l * f.y;
float n = (optic(p, v) + optic(v, u)) * (PI * 4.0);
sum += density(v) * exp(-n * (K_R * C_R + K_M));
v += step;
}
sum *= len * SCALE_L;
float c = dot(dir, -l);
float cc = c * c;
return sum * (K_R * C_R * phase_reyleigh(cc) + K_M * phase_mie(G_M, c, cc)) * E;
}
/* end functions */
/* vertex shader begins here*/
#ifdef VERTEX
const float SpecularContribution = 0.3;
const float DiffuseContribution = 1.0 - SpecularContribution;
uniform vec4 _LP;
varying vec2 TextureCoordinate;
varying float LightIntensity;
varying vec4 someOutput;
/* transient stuff */
varying vec3 eyeOutput;
varying vec3 dirOutput;
varying vec3 lOutput;
varying vec2 eOutput;
/* lighting stuff */
// i.e. one could #include "UnityCG.glslinc"
uniform vec3 _WorldSpaceCameraPos;
// camera position in world space
uniform mat4 _Object2World; // model matrix
uniform mat4 _World2Object; // inverse model matrix
uniform vec4 _WorldSpaceLightPos0;
// direction to or position of light source
uniform vec4 _LightColor0;
// color of light source (from "Lighting.cginc")
void main()
{
/* code from that example shader */
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
vec3 ecPosition = vec3(gl_ModelViewMatrix * gl_Vertex);
vec3 tnorm = normalize(gl_NormalMatrix * gl_Normal);
vec3 lightVec = normalize(_LP.xyz - ecPosition);
vec3 reflectVec = reflect(-lightVec, tnorm);
vec3 viewVec = normalize(-ecPosition);
/* copied from https://en.wikibooks.org/wiki/GLSL_Programming/Unity/Specular_Highlights for testing stuff */
//I have no idea what I'm doing, but hopefully this computes some vectors which I need
mat4 modelMatrix = _Object2World;
mat4 modelMatrixInverse = _World2Object; // unity_Scale.w
// is unnecessary because we normalize vectors
vec3 normalDirection = normalize(vec3(
vec4(gl_Normal, 0.0) * modelMatrixInverse));
vec3 viewDirection = normalize(vec3(
vec4(_WorldSpaceCameraPos, 1.0)
- modelMatrix * gl_Vertex));
vec3 lightDirection;
float attenuation;
if (0.0 == _WorldSpaceLightPos0.w) // directional light?
{
attenuation = 1.0; // no attenuation
lightDirection = normalize(vec3(_WorldSpaceLightPos0));
}
else // point or spot light
{
vec3 vertexToLightSource = vec3(_WorldSpaceLightPos0
- modelMatrix * gl_Vertex);
float distance = length(vertexToLightSource);
attenuation = 1.0 / distance; // linear attenuation
lightDirection = normalize(vertexToLightSource);
}
/* test port */
// default ray dir
//That's the direction of the camera here?
vec3 dir = viewDirection; //normalDirection;//viewDirection;// tnorm;//lightVec;//lightDirection;//normalDirection; //lightVec;//tnorm;//ray_dir(45.0, iResolution.xy, fragCoord.xy);
// default ray origin
//I think they mean the position of the camera here?
vec3 eye = vec3(_WorldSpaceCameraPos); //vec3(_WorldSpaceLightPos0); //// vec3(0.0, 0.0, 0.0); //_WorldSpaceCameraPos;//ecPosition; //vec3(0.0, 0.0, 2.4);
// rotate camera not needed, remove it
// sun light dir
//I think they mean the direciton of our directional light?
vec3 l = lightDirection;//_LightColor0.xyz; //lightDirection; //normalDirection;//normalize(vec3(_WorldSpaceLightPos0));//lightVec;// vec3(0, 0, 1);
/* this computes the intersection of the ray and the sphere.. is this really needed?*/
vec2 e = ray_vs_sphere(eye, dir, R);
/* copy stuff sothat we can use it on the fragment shader, "discard" is only allowed in fragment shader,
so the rest has to be computed in fragment shader */
eOutput = e;
eyeOutput = eye;
dirOutput = dir;
lOutput = dir;
}
#endif
#ifdef FRAGMENT
uniform sampler2D _MainTex;
varying vec2 TextureCoordinate;
uniform vec4 _LC;
varying float LightIntensity;
/* transient port */
varying vec3 eyeOutput;
varying vec3 dirOutput;
varying vec3 lOutput;
varying vec2 eOutput;
void main()
{
/* real fragment */
if (eOutput.x > eOutput.y) {
//discard;
}
vec2 f = ray_vs_sphere(eyeOutput, dirOutput, R_INNER);
vec2 e = eOutput;
e.y = min(e.y, f.x);
vec3 I = in_scatter(eyeOutput, dirOutput, eOutput, lOutput);
gl_FragColor = vec4(I, 1.0);
/*vec4 c2;
c2.x = 1.0;
c2.y = 1.0;
c2.z = 0.0;
c2.w = 1.0f;
gl_FragColor = c2;*/
//gl_FragColor = c;
}
#endif
ENDGLSL
}
}
}
感谢任何帮助,对于长篇文章和解释感到抱歉。
编辑:我刚刚发现,spehere的半径确实会对这些东西产生影响,每个方向上的比例为2.0的球体会产生更好的结果。然而,图片仍然完全独立于相机和任何灯光的视角,这远不是shaderlab版本。
答案 0 :(得分:0)
看起来你正试图在球体上渲染2D纹理。它有一些不同的方法。对于你想要做的事情,我会将着色器应用于与球体交叉的平面上。
出于一般目的,请查看this article,了解如何将shaderToy转换为Unity3D。
我在这里列出了一些步骤:
关于这个问题:
Tags{ "Queue" = "Geometry" } //Is this even the right queue?
队列引用它将呈现的顺序,Geometry是第一个,如果你想要着色器运行你可以使用Overlay的所有东西,例如。本主题为covered here。