我在获取正确的屏幕坐标时遇到了麻烦,因此我做了一个小的MWE以复制我在主项目中看到的行为。我在Unity 3D中使用Single Pass Rendering和Windows Mixed Reality耳机。我有一个着色器,将其设置为新材料“ ppMaterial”,然后将其用于
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
Graphics.Blit(source, destination, ppMaterial);
}
在此MWE中,着色器仅应在屏幕边缘绘制圆形,因此我制作了一个简单的着色器:
Shader "Custom/ScreenShader" {
Properties {
}
SubShader {
Pass {
CGPROGRAM
#pragma fragment frag
#pragma vertex vert
#include "UnityCG.cginc"
uniform sampler2D _MainTex;
half4 _MainTex_ST;
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float4 screenPos : TEXCOORD1;
};
float getDist(float2 isIn, float2 pos2){
return sqrt((isIn.x - pos2.x)*(isIn.x - pos2.x) + (isIn.y - pos2.y)*(isIn.y - pos2.y));
}
float4 NearBoundary(float2 isIn, v2f i)
{
float2 pos2 = float2(0.5,0.5);
float2 pos3 = float2(0,0.5);
float2 pos4 = float2(0.5,0.0);
float dist2 = getDist(isIn, pos2);
float dist3 = getDist(isIn, pos3);
float dist4 = getDist(isIn, pos4);
if(dist2<0.1){
return float4(0,0.5,0.5,1);
}
if(dist3.x<0.3){
return float4(0.5,0,0.5,1);
}
if(dist4.x<0.3){
return float4(0.5,0,0.5,1);
}
return tex2D(_MainTex, UnityStereoScreenSpaceUVAdjust(i.uv, _MainTex_ST));
}
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.screenPos = ComputeScreenPos(o.vertex);
o.uv = v.uv;
return o;
}
half4 frag(v2f i) : SV_Target {
float2 screenPos = i.screenPos;
return NearBoundary(screenPos, i);
}
ENDCG
}
}
}
但这是行不通的,圆点不会分别在应有的中心和边界处绘制。如何获得每只眼睛在(0,1)范围内的正确屏幕坐标?
答案 0 :(得分:0)
发现问题是提供坐标的相机设置了目标纹理,这似乎弄乱了坐标。然后,screenPos必须设置为
screenPos.x -= unity_StereoEyeIndex * 0.5;
screenPos.x *= 2;
还要插入
RenderTextureDescriptor d = XRSettings.eyeTextureDesc;
d.width = d.width / 2; // THIS DOES NOT MAKE SENSE
RenderTexture t = new RenderTexture(d);
在C#代码以及设置中
uv.y = 1-uv.y
在着色器中,我终于摆脱了所有问题。
将其发布给其他有这些超级奇怪问题的人...