Unity Shader Example 20 (Soft particle)

Shader "Particles/Additive (Soft)" {
Properties {
	_MainTex ("Particle Texture", 2D) = "white" {}
	_InvFade ("Soft Particles Factor", Range(0.01,3.0)) = 1.0
}

Category {
	Tags { "Queue"="Transparent" "IgnoreProjector"="True" "RenderType"="Transparent" }
	Blend One OneMinusSrcColor
	ColorMask RGB
	Cull Off
	Lighting Off
	ZWrite Off

	SubShader {
		Pass {
		
			CGPROGRAM
			#pragma vertex vert
			#pragma fragment frag

			#include "UnityCG.cginc"

			sampler2D _MainTex;
			fixed4 _TintColor;
			
			struct appdata_t {
				float4 vertex : POSITION;
				fixed4 color : COLOR;
				float2 texcoord : TEXCOORD0;
			};

			struct v2f {
				float4 vertex : SV_POSITION;
				fixed4 color : COLOR;
				float2 texcoord : TEXCOORD0;
				
				float4 projPos : TEXCOORD2;
				
			};

			float4 _MainTex_ST;
			
			v2f vert (appdata_t v)
			{
				v2f o;
				o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
				
				// *** o.projPos = ComputeScreenPos (o.vertex);
				// *** o.projPos.xy = (o.vertex.xy * fixed2(1, _ProjectionParams.x) + o.vertex.w) * 0.5;
				o.projPos.xy = (o.vertex.xy * fixed2(1, -1) + o.vertex.w) * 0.5;
				o.projPos.zw = o.vertex.zw;

				//COMPUTE_EYEDEPTH(o.projPos.z);
				o.projPos.z = -(mul( UNITY_MATRIX_MV, v.vertex ).z);


				o.color = v.color;
				o.texcoord = TRANSFORM_TEX(v.texcoord,_MainTex);
				
				return o;
			}

			sampler2D_float _CameraDepthTexture;
			float _InvFade;
			uniform float4x4 _MVP;
			
			fixed4 frag (v2f i) : SV_Target
			{
				/*
				float sceneZ_Ndc = LinearEyeDepth (SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos)));
				float sceneZ_Eye = LinearEyeDepth(sceneZ_Ndc);
				*/

				/*
				// OpenGL
				// *** float sceneZ = LinearEyeDepth (SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos)));

				i.projPos.xy = i.projPos.xy / i.projPos.w;
				// 这里的范围是[0-1],OpenGL需要是[-1, 1]
				float sceneZ_Ndc = tex2D(_CameraDepthTexture, i.projPos.xy).r;
				//[0-1] -> [-1, 1]
				sceneZ_Ndc = sceneZ_Ndc * 2 - 1;

				// *** float sceneZ_Eye = LinearEyeDepth(sceneZ_Ndc);
				float near = _ProjectionParams.y;
				float far = _ProjectionParams.z;

				// openGL投影矩阵,[3][3], [3][4]
				// zn = ( A * ze + B ) / (-ze)
				// ze = -B / (zn + A)

				float A = -(far + near) / (far - near);
				float B = -2 * far * near / (far - near);
				float sceneZ_Eye = -B / (sceneZ_Ndc + A);
				sceneZ_Eye = -sceneZ_Eye;
				*/


				///*
				//dx
				// *** float sceneZ = LinearEyeDepth (SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos)));
				i.projPos.xy = i.projPos.xy / i.projPos.w;
				// 这里的范围是[0-1]
				float sceneZ_Ndc = tex2D(_CameraDepthTexture, i.projPos.xy).r;

				// *** float sceneZ_Eye = LinearEyeDepth(sceneZ_Ndc);
				float near = _ProjectionParams.y;
				float far = _ProjectionParams.z;

				// dx投影矩阵,[3][3], [4][3]
				// zn = ( A * ze + B ) / (ze)
				// ze = B / (zn - A)

				//dx投影矩阵
				float A = far / (far - near);
				float B = far * near / (near - far);
				float sceneZ_Eye = B / (sceneZ_Ndc - A);
				//*/

				

				float partZ_Eye = i.projPos.z;
				float fade = saturate (_InvFade * (sceneZ_Eye - partZ_Eye));
				i.color.a *= fade;
				half4 col = i.color * tex2D(_MainTex, i.texcoord);
				col.rgb *= col.a;
				return col;
				
			}
			ENDCG 
		}
	} 
}
}

/*

// x = 1 or -1 (-1 if projection is flipped)
// y = near plane
// z = far plane
// w = 1/far plane
uniform float4 _ProjectionParams;
				
inline float4 ComputeScreenPos (float4 pos) {
	float4 o = pos * 0.5f;
	#if defined(UNITY_HALF_TEXEL_OFFSET)
	o.xy = float2(o.x, o.y*_ProjectionParams.x) + o.w * _ScreenParams.zw;
	#else
	o.xy = float2(o.x, o.y*_ProjectionParams.x) + o.w;
	#endif
	
	o.zw = pos.zw;
	return o;
}



#define COMPUTE_EYEDEPTH(o) o = -mul( UNITY_MATRIX_MV, v.vertex ).z

define SAMPLE_DEPTH_TEXTURE_PROJ(sampler, uv) (tex2Dproj(sampler, uv).r)


inline float LinearEyeDepth( float z )
{
	return 1.0 / (_ZBufferParams.z * z + _ZBufferParams.w);
}


//其中_ZBufferParams的定义如下:
//double zc0, zc1;
// OpenGL would be this:
// zc0 = (1.0 - m_FarClip / m_NearClip) / 2.0;
// zc1 = (1.0 + m_FarClip / m_NearClip) / 2.0;
// D3D is this:
//zc0 = 1.0 - m_FarClip / m_NearClip;
//zc1 = m_FarClip / m_NearClip;
// now set _ZBufferParams with (zc0, zc1, zc0/m_FarClip, zc1/m_FarClip);


*/


记录

1.  在Unity3D中,什么MVP的矩阵形式,其实都是OpenGL形式,写了测试代码来证明这一点:

public GameObject target;
	// Use this for initialization
	void Start () {
        Camera cam = GetComponent<Camera>();
        Matrix4x4 worldView = cam.worldToCameraMatrix;
        Vector3 viewPos = worldView * target.transform.localPosition;
        //target.GetComponent<MeshRenderer>().material.SetMatrix("_MVP", worldView * cam.projectionMatrix);
        Debug.Log(viewPos);
        Debug.Log(worldView);

        Debug.Log(cam.projectionMatrix);

        Debug.Log("--------------------OpenGL------------------------");
        // OpenGL
        Debug.Log(-(cam.far + cam.near) / (cam.far - cam.near));
        Debug.Log(-2 * cam.far * cam.near / (cam.far - cam.near));

        Debug.Log("--------------------D3D-------------------------");
        // D3D
        Debug.Log((cam.far) / (cam.far - cam.near));
        Debug.Log(cam.far * cam.near / (cam.near - cam.far));
	}


2.

o.projPos = ComputeScreenPos (o.vertex);  

可以替换为:

o.projPos.xy = (o.vertex.xy * fixed2(1, _ProjectionParams.x) + o.vertex.w) * 0.5;

o.projPos.zw = o.vertex.zw;


(只是把[-w, w] 变换为[0, w])

// x = 1 or -1 (-1 if projection is flipped)
// y = near plane
// z = far plane
// w = 1/far plane
uniform float4 _ProjectionParams;



SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos))

可以替换:

i.projPos.xy = i.projPos.xy / i.projPos.w;
float sceneZ_Ndc = tex2D(_CameraDepthTexture, i.projPos.xy).r;


i.projPos.xy = i.projPos.xy / i.projPos.w;

(从[0, w] 变成 [0, 1], 为了取样)



3.

//COMPUTE_EYEDEPTH(o.projPos.z);


替换为


o.projPos.z = -(mul( UNITY_MATRIX_MV, v.vertex ).z);


为什么要乘上负号,其实讲Camera 的view矩阵打印出来,就会发现,因为是Camera的看的轴是 -z 轴, 与OpenGL是类似的。

如果,假设,camera 在 (0,0,0),cube在(0,0,10), 

mul( UNITY_MATRIX_MV, v.vertex ).z 得到的,是-10, 

所以

o.projPos.z = -(mul( UNITY_MATRIX_MV, v.vertex ).z); 得到的就是真正的物体相对于Camera的Z的值。


4.

float sceneZ = LinearEyeDepth (SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos)));

代替为

i.projPos.xy = i.projPos.xy / i.projPos.w;
// 这里的范围是[0-1]
float sceneZ_Ndc = tex2D(_CameraDepthTexture, i.projPos.xy).r;


// *** float sceneZ_Eye = LinearEyeDepth(sceneZ_Ndc);
float near = _ProjectionParams.y;
float far = _ProjectionParams.z;


// dx投影矩阵,[3][3], [4][3]
// zn = ( A * ze + B ) / (ze)
// ze = B / (zn - A)


//dx投影矩阵
float A = far / (far - near);
float B = far * near / (near - far);
float sceneZ_Eye = B / (sceneZ_Ndc - A);



看看LinearEyeDepth 的定义,

inline float LinearEyeDepth( float z )
{
return 1.0 / (_ZBufferParams.z * z + _ZBufferParams.w);
}



//其中_ZBufferParams的定义如下:
//double zc0, zc1;
// OpenGL would be this:
// zc0 = (1.0 - m_FarClip / m_NearClip) / 2.0;
// zc1 = (1.0 + m_FarClip / m_NearClip) / 2.0;
// D3D is this:
//zc0 = 1.0 - m_FarClip / m_NearClip;
//zc1 = m_FarClip / m_NearClip;
// now set _ZBufferParams with (zc0, zc1, zc0/m_FarClip, zc1/m_FarClip);


其实,目前,_ZBufferParams,就是用了D3D情况下的值,就是

D3D :

_ZBufferParams = ( (n - f) / n, f / n , (n - f) / nf ,1 / n )


D3D的投影矩阵,变换Z的矩阵的[3][3] (A), [4][3] (B),分别是

A = f / (f - n)

B = fn / (n - f)


D3D的投影矩阵变换Z的公式:zn(z_NDC), ze (z_eye)

 zn = ( A * ze + B ) / (ze)


ze = B / (zn - A)


代入上面的,就可以得到,

1.0 / (_ZBufferParams.z * z + _ZBufferParams.w); 的目的就是为了从z_ndc 变为到 z_eye,

为什么要使用Dx的投影矩阵,是因为DepathTexture的范围是(0, 1), 所以,用这个方法可以好快地讲(0, 1)的Z ,变换到 eye空间上,并且不需要乘上负号,因为

Dx的变换矩阵默认就是+z轴的。(因为Dx的投影矩阵是把z变换到[0, 1]的,这里是可以进行逆的)。

因为OpenGl在投影矩阵的时候,是 zn = (Aze + B) / -ze , 而Dx的是,zn = (Aze + B) / ze, 所以,OpenGL的投影你操作,得到的,是-ze,所以要取反, 而 Dx得到的直接是ze,所以不用取反


 5. 

其实上面的有点不那么好理解,所以,现在另一种方案就是


float sceneZ = LinearEyeDepth (SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.projPos)));

替换为

i.projPos.xy = i.projPos.xy / i.projPos.w;
// 这里的范围是[0-1],OpenGL需要是[-1, 1]
float sceneZ_Ndc = tex2D(_CameraDepthTexture, i.projPos.xy).r;
//[0-1] -> [-1, 1]
sceneZ_Ndc = sceneZ_Ndc * 2 - 1;


// *** float sceneZ_Eye = LinearEyeDepth(sceneZ_Ndc);
float near = _ProjectionParams.y;
float far = _ProjectionParams.z;


// openGL投影矩阵,[3][3], [3][4]
// zn = ( A * ze + B ) / (-ze)
// ze = -B / (zn + A)


float A = -(far + near) / (far - near);
float B = -2 * far * near / (far - near);
float sceneZ_Eye = -B / (sceneZ_Ndc + A);
sceneZ_Eye = -sceneZ_Eye;


值得主要的就是,因为OpenGL的NDC的Z是【-1, 1】的,那么DepathTexture的【0,1】就要进行变换,少了这一步就不对了。

之后就上面的Dx类似了。


// openGL投影矩阵,[3][3](A), [3][4](B)  分别是:

A:-(f + n) / (f - n)

B: -2(fn) / (f - n)


变换公式:

// zn = ( A * ze + B ) / (-ze)
// ze = -B / (zn + A)


_ZBufferParams = (  (n - f)/2n, (n+f)/2n, (n-f)/2fn,  (n+f)/2fn )


最后记得

sceneZ_Eye = -sceneZ_Eye;

因为OpenGl在投影矩阵的时候,是 zn = (Aze + B) / -ze , 而Dx的是,zn = (Aze + B) / ze, 所以,OpenGL的投影你操作,得到的,是-ze,所以要取反, 而 Dx得到的直接是ze,所以不用取反。


一样可以得到一样的效果。



参考:

http://forum.unity3d.com/threads/_zbufferparams-values.39332/

http://www.humus.name/temp/Linearize%20depth.txt




  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值