Unity Shader: 优化GPU代码--用step()代替if else等条件语句。

普通的卡通着色Shader:

先看一个Shader,卡通着色。由于卡通着色需要对不同渲染区域进行判定,比较适合做案例。

Shader "Unlit/NewToonShading"
{
	Properties
	{
		_Shininess("Shininess",float)=1
		_Edge("Edge Scale",range(0,1))=0.2
		_FinalColor("Final Color",Color)=(0.5,0.5,0.5,1)
		_EdgeColor("Edge Color",Color)=(0,0,0,1)
	}

	SubShader
	{
		Tags { "RenderType"="Opaque"}
		LOD 100

		Pass
		{
			Tags {"LightMode"="Vertex" }
			CGPROGRAM
			#pragma vertex vert
			#pragma fragment frag

			#include "UnityCG.cginc"

			struct appdata
			{
				float4 vertex : POSITION;
				float3 normal:NORMAL;
			};

			struct v2f
			{
				float4 vertex : SV_POSITION;
				float3 N:TEXCOORD0;
				float3 L:TEXCOORD1;
				float3 H:TEXCOORD2;
				float3 V:TEXCOORD3;
			};

			float _Shininess;
			float _Edge;
			float4 _FinalColor;
			float4 _EdgeColor;
			float4 _LightPosition_World;

			v2f vert (appdata v)
			{
				v2f o=(v2f)0;

				float4 worldPos=mul(unity_ObjectToWorld,v.vertex);

				float4 lightPos_World=mul(UNITY_MATRIX_I_V,unity_LightPosition[1]);
	
				o.N=normalize(mul(unity_ObjectToWorld,v.normal));
				o.L=normalize(lightPos_World-worldPos.xyz);
				o.V=normalize(_WorldSpaceCameraPos-worldPos.xyz);
				o.H=normalize(o.L+o.V);

				o.vertex = UnityObjectToClipPos(v.vertex);
				return o;
			}

			fixed4 frag (v2f i) : SV_Target
			{
				i.N=normalize(i.N);
				i.L=normalize(i.L);
				i.H=normalize(i.H);
				i.V=normalize(i.V);
		
				float4 Kd=_FinalColor;	//diffuse漫反射光
				float4 Ks=0;			//specular高光
				fixed4 col;				//最终颜色
				//边缘判定
				float edge=max(dot(i.N,i.V),0);
				if(edge<_Edge){
					return _EdgeColor;	//返回一个描边颜色
				}

				//暗光判定
				float diffuseLight=max(dot(i.N,i.L),0);
				if(diffuseLight<=0.1f){		//暗光区域
					Kd*=0.5f;				//亮光区域亮度减半
					Ks=0;					//无高光。如果diffuseLight<=0,说明N,H夹角大于了90',眼睛或光源在材质表面后方,Ks也是0
					col=Kd+Ks;
					return col;
				}
				
				//高光判定
				float specularLight=pow(max(dot(i.N,i.H),0),_Shininess);
				if(specularLight>=0.95f){
					Ks=float4(1.0f,1.0f,1.0f,0.0f);		//高光由0变为高光颜色
				}

				col=Kd+Ks;
				return col;
			}
			ENDCG
		}
	}
}

这里写图片描述
(图1:NewToonShading渲染效果)

用step()进行优化的原理:

在上面Shader的片段着色器中,我以正常cpu编程的逻辑进行了编程,例如,if(edge<_Edge){return _EdgeColor;},如果此像素被判定为边缘,则直接返回边缘颜色,那么则不用再进行之后的运算了。以此类推后面又用if else 分别进行了高光,亮光,暗光区的判断。但是这种优化对于gpu编程来讲是无效的。因为对于GPU来讲,各个顶点各个像素都在进行大量的并行运算,每个片段着色器都在同步运行,边缘地带像素的片段着色器虽然率先return,但是它依然要等待最后一个return的像素。只有所有像素全部完成计算,才会进行下一次运算, 在片段着色器中,每个片段处理器每条指令操作上百个像素,如果有些片段(像素)采取一个分支而有些片段不采用另一个分支,则所有片段都会执行两个分支,但只在每个片段应该采取的分支上写入寄存器。不论何种策略,对追求高并行度的GPU来讲,分支是必须要同步的,那么最慢的case就会造成短板效应。另外,if/endif等流程控制操作对GPU来讲有较高的指令开销(4个时钟周期,Geforce6) 修 改 1 _{修改1} 1。因此在GPU编程中,if else ,switch case和嵌套if语句等等是不推荐的会影响GPU的工作效率。相应的,可以用step()等函数进行替换,用阶梯函数的思维来构建条件语句。这样,所有的线程都执行完全一样的代码,加大了并行化计算的可能性,消除条件分支指令的性能损耗,在很多方面对GPU都是有益的。

Step()版本:

Shader "Unlit/NewToonShading_StepVersion"
{
	Properties
	{
		_Shininess("Shininess",float)=1
		_Edge("Edge Scale",range(0,1))=0.2
		_FinalColor("Final Color",Color)=(0.5,0.5,0.5,1)
		_EdgeColor("Edge Color",Color)=(0,0,0,1)
	}

	SubShader
	{
		Tags { "RenderType"="Opaque"}
		LOD 100

		Pass
		{
			Tags {"LightMode"="Vertex" }
			CGPROGRAM
			#pragma vertex vert
			#pragma fragment frag

			#include "UnityCG.cginc"

			struct appdata
			{
				float4 vertex : POSITION;
				float3 normal:NORMAL;
			};

			struct v2f
			{
				float4 vertex : SV_POSITION;
				float3 N:TEXCOORD0;
				float3 L:TEXCOORD1;
				float3 H:TEXCOORD2;
				float3 V:TEXCOORD3;
			};

			float _Shininess;
			float _Edge;
			float4 _FinalColor;
			float4 _EdgeColor;
			float4 _LightPosition_World;

			v2f vert (appdata v)
			{
				v2f o=(v2f)0;

				float4 worldPos=mul(unity_ObjectToWorld,v.vertex);

				float4 lightPos_World=mul(UNITY_MATRIX_I_V,unity_LightPosition[1]);
	
				o.N=normalize(mul(unity_ObjectToWorld,v.normal));
				o.L=normalize(lightPos_World-worldPos.xyz);
				o.V=normalize(_WorldSpaceCameraPos-worldPos.xyz);
				o.H=normalize(o.L+o.V);
				
				o.vertex = UnityObjectToClipPos(v.vertex);
				return o;
			}

			fixed4 frag (v2f i) : SV_Target
			{
				i.N=normalize(i.N);
				i.L=normalize(i.L);
				i.H=normalize(i.H);
				i.V=normalize(i.V);
		
				float4 Kd=_FinalColor;
				float4 Ks=0;
				fixed4 col;
				//边缘判定
				float edge=max(dot(i.N,i.V),0);
				edge=step(edge,_Edge); //if(edge<=_Edge) edge=1 , else edge=0
				_EdgeColor*=edge;
				
				//高光判定
				float specularLight=pow(max(dot(i.N,i.H),0),_Shininess);
				specularLight=step(0.95f,specularLight);		//if specularLight>=0.95f specularLight=1 else =0

				//暗光判定
				float diffuseLight=max(dot(i.N,i.L),0);
				diffuseLight=step(0.1f,diffuseLight); //if(diffuseLight>=0.1f) diffuseLight=1   else diffuseLight=0
				
				Ks=specularLight*diffuseLight;		//if diffuseLight=0, Ks=0; else Ks=specularLight(1 or 0)

				diffuseLight=diffuseLight*0.5f+0.5f;	  //change 1 or 0 to 1 or 0.5

			   //0.5Kd or Kd  1or0  1or0       0or1	  0orEdgeColor	
				col=(Kd*diffuseLight+Ks)*(1.0f-edge)+_EdgeColor;		
				return col;
			}
			ENDCG
		}
	}
}

举例解释:

在HLSL中, step(a,b)既是当b>=a时返回1,否则返回0,换句话说既是当a<=b时返回1,否则返回0。因此可以把被比较数灵活的插入a或b的位置,完成小于或大于的比较。由于返回值是0或1,它无法直观的替代if else逻辑判断,需要结合改造算法,例如:

				//边缘判定
				float edge=max(dot(i.N,i.V),0);

				if(edge<_Edge){
					return _EdgeColor;
				}

上文中,直接返回的_EdgeColor,将在下文中变为一个000或保持自身值的rgb变量,新增的一个edge变量会变为0或1的开关,并在最后的计算步骤中参与最终颜色的计算:

				//边缘判定
				float edge=max(dot(i.N,i.V),0);

				edge=step(edge,_Edge); //if(edge<=_Edge) edge=1 , else edge=0

				_EdgeColor*=edge;
				//...中间过程略...
			//0.5Kd or Kd  1or0		1or0       0or1	  0orEdgeColor	
				col=(Kd*diffuseLight+Ks)*(1.0f-edge)+_EdgeColor;

如果此当前像素为边缘,edge为1,那么在最终颜色计算中,不论其他变量如何,它都会变为一个0+_EdgeColor的值,既是边缘颜色。如果此像素为非边缘地带,edge为0,_EdgeColor为0,那么最终颜色为 “其他颜色”*1+0,边缘颜色被剔除。

以此类推,原版中高光,亮光与暗光区域判断的返回值也都变成了参与到最终颜色计算中的变量。具体逻辑可见step()版本各行后面注释。

测试

这里写图片描述
(图2:NewToonShading与NewToonShading_StepVersion渲染效果比较)

这里写图片描述
(图3:NewToonShading与NewToonShading_StepVersion渲染效果比较)

两个版本的FPS波动范围基本相同,有可能是计算量太小或此Shader内容对此问题不太敏感,但起码证明if else版本按照CPU的思维提前返回相对于step()版本进行所有的计算是无起到任何优势的。 第一是if分支内计算量较小,未造成太明显的短板效应与帧速瓶颈。第二可能是step版本虽省去了分支指令,但是增加了计算指令,抵消后优化效果过于微弱。 修 改 2 _{修改2} 2

汇编版本:

汇编后的片段着色器代码(部分截取):

if else版本:

   0: dp3 r0.x, v1.xyzx, v1.xyzx
   1: rsq r0.x, r0.x
   2: mul r0.xyz, r0.xxxx, v1.xyzx
   3: dp3 r0.w, v4.xyzx, v4.xyzx
   4: rsq r0.w, r0.w
   5: mul r1.xyz, r0.wwww, v4.xyzx
   6: dp3 r0.w, r0.xyzx, r1.xyzx
   7: max r0.w, r0.w, l(0.000000)
   8: lt r0.w, r0.w, cb0[2].y
   9: if_nz r0.w
  10:   mov o0.xyzw, cb0[4].xyzw
  11:   ret 
  12: endif 
  13: dp3 r0.w, v2.xyzx, v2.xyzx
  14: rsq r0.w, r0.w
  15: mul r1.xyz, r0.wwww, v2.xyzx
  16: dp3 r0.w, r0.xyzx, r1.xyzx
  17: max r0.w, r0.w, l(0.000000)
  18: ge r0.w, l(0.100000), r0.w
  19: if_nz r0.w
  20:   mul o0.xyzw, cb0[3].xyzw, l(0.500000, 0.500000, 0.500000, 0.500000)
  21:   ret 
  22: endif 
  23: dp3 r0.w, v3.xyzx, v3.xyzx
  24: rsq r0.w, r0.w
  25: mul r1.xyz, r0.wwww, v3.xyzx
  26: dp3 r0.x, r0.xyzx, r1.xyzx
  27: max r0.x, r0.x, l(0.000000)
  28: log r0.x, r0.x
  29: mul r0.x, r0.x, cb0[2].x
  30: exp r0.x, r0.x
  31: ge r0.x, r0.x, l(0.950000)
  32: and r0.xyzw, r0.xxxx, l(0x3f800000, 0x3f800000, 0x3f800000, 0)
  33: add o0.xyzw, r0.xyzw, cb0[3].xyzw
  34: ret 

第9和第19行两个if_nz分支指令。从第31行 ge r0.x, r0.x, l(0.950000) 看来编译器是把第三个if分支优化掉了。

step()版本:

   0: dp3 r0.x, v3.xyzx, v3.xyzx
   1: rsq r0.x, r0.x
   2: mul r0.xyz, r0.xxxx, v3.xyzx
   3: dp3 r0.w, v1.xyzx, v1.xyzx
   4: rsq r0.w, r0.w
   5: mul r1.xyz, r0.wwww, v1.xyzx
   6: dp3 r0.x, r1.xyzx, r0.xyzx				//dot(i.N,i.H)
   7: max r0.x, r0.x, l(0.000000)				//max(dot(i.N,i.H),0)
   8: log r0.x, r0.x							//
   9: mul r0.x, r0.x, cb0[2].x					//pow(max(dot(i.N,i.H),0),_Shininess);
  10: exp r0.x, r0.x							//
  11: ge r0.x, r0.x, l(0.950000)				//specularLight=step(0.95f,specularLight);	
  12: dp3 r0.y, v2.xyzx, v2.xyzx
  13: rsq r0.y, r0.y
  14: mul r0.yzw, r0.yyyy, v2.xxyz
  15: dp3 r0.y, r1.xyzx, r0.yzwy				//dot(i.N,i.L)
  16: max r0.y, r0.y, l(0.000000)				//float diffuseLight=max(dot(i.N,i.L),0);
  17: ge r0.y, r0.y, l(0.100000)				//diffuseLight=step(0.1f,diffuseLight); 
  18: and r0.xz, r0.xxyx, l(0x3f800000, 0, 0x3f800000, 0)
  19: movc r0.y, r0.y, l(1.000000), l(0.500000)
  20: mul r0.x, r0.z, r0.x
  21: mad r0.xyzw, cb0[3].xyzw, r0.yyyy, r0.xxxx
  22: dp3 r1.w, v4.xyzx, v4.xyzx
  23: rsq r1.w, r1.w
  24: mul r2.xyz, r1.wwww, v4.xyzx
  25: dp3 r1.x, r1.xyzx, r2.xyzx				//dot(i.N,i.V)
  26: max r1.x, r1.x, l(0.000000)				//float edge=max(dot(i.N,i.V),0);
  27: ge r1.x, cb0[2].y, r1.x					//edge=step(edge,_Edge);
  28: movc r0.xyzw, r1.xxxx, l(0,0,0,0), r0.xyzw
  29: and r1.x, r1.x, l(0x3f800000)
  30: mad o0.xyzw, cb0[4].xyzw, r1.xxxx, r0.xyzw
  31: ret 

感觉edge的计算好像是移到了后面(25-27)。8-10用log,mul,exp怎么实现源码的pow()没太看懂。step()的实现都是用的一条ge指令:

格式:ge dest src0 src1

做src0 >= src1比较,如果为真,0xFFFFFFFF写入到dest,否则写入0x0000000。

————————————————————————————————
参考:
GPU gems 2 - Nvidia
Microsoft HLSL: https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/ge–sm4—asm-
维护日志:
2017-9-20:修改1,修改2
2020-8-16:维护

  • 24
    点赞
  • 72
    收藏
    觉得还不错? 一键收藏
  • 9
    评论
Projective texture mapping in Unity Shader is a technique used to project a texture onto an object in a way that simulates the effect of a slide projector. It can be used to create various effects like projecting a spotlight texture onto a surface, projecting a decal onto a curved surface, or creating a shadow map for an object. To implement projective texture mapping in Unity Shader, you need to use a combination of vertex and fragment shaders. The vertex shader calculates the projective transformation matrix, which is used to transform the texture coordinates in the fragment shader. The fragment shader samples the texture and applies it to the object's surface. Here's a simple example of a vertex shader that calculates the projective transformation matrix: ``` Shader "Custom/ProjectiveTextureMapping" { Properties { _MainTex ("Texture", 2D) = "white" {} _Projector ("Projector", 3D) = "" {} } SubShader { Pass { CGPROGRAM #pragma vertex vert #pragma fragment frag uniform sampler2D _MainTex; uniform float4x4 unity_Projector; float4x4 unity_ObjectToWorld; struct appdata { float4 vertex : POSITION; float3 normal : NORMAL; float2 uv : TEXCOORD0; }; struct v2f { float2 uv : TEXCOORD0; float4 vertex : SV_POSITION; }; v2f vert (appdata v) { v2f o; o.vertex = UnityObjectToClipPos(v.vertex); o.uv = mul(unity_Projector, v.vertex).xy; return o; } fixed4 frag (v2f i) : SV_Target { return tex2D(_MainTex, i.uv); } ENDCG } } } ``` In this example, the `_MainTex` property is the texture to be projected, and the `_Projector` property is the object that will project the texture. The `unity_Projector` variable is a matrix that transforms the texture coordinates from object space to clip space. The `vert` function calculates the transformed texture coordinates and passes them to the fragment shader in the `v2f` struct. The fragment shader simply samples the texture using the transformed texture coordinates and returns the color. You can use this shader by assigning it to a material and setting the `_MainTex` and `_Projector` properties to appropriate textures and objects, respectively.

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值