Shader入门精要-4-高级篇

屏幕后效

//去除directX平台差异 openGL左下角和directX左上角
			#if UNITY_UV_STARTS_AT_TOP
			if(_MainTex_TexelSize.y<0)
				o.uv_depth.y = 1-o.uv_depth.y;
			#endif

一些后效介绍

雾效

根据深度纹理来重建每个像素 然后插入雾效

雾效系数:

雾的混合系数

unity内置:线性 指数 指数的平方
在这里插入图片描述
C#:

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
//雾效
[ExecuteInEditMode]
[RequireComponent(typeof(Camera))]
public class PostTest6 : MonoBehaviour
{
    //雾效参数
    public float fogDensity = 1f;
    public Color fogColor = Color.white;

    //作用高度
    public float fogStart = 0f;
    public float fogEnd = 2.0f;

    public Shader sha;
    public Material mat;

    //获取相机视角和投影矩阵
    public Camera nowCamera;

    Matrix4x4 oldMatrix;

    private void Start()
    {
        nowCamera.depthTextureMode |= DepthTextureMode.Depth;
           
        mat = new Material(sha);
    }

    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
       

        //相机参数
        float fov = nowCamera.fieldOfView;
        float near = nowCamera.nearClipPlane;
        float far = nowCamera.farClipPlane;
        float aspect = nowCamera.aspect;

        //中心点
        float halfHeight = near * Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad);
        Vector3 toRight = nowCamera.transform.right * halfHeight * aspect;
        Vector3 toTop = nowCamera.transform.up * halfHeight;


        //相机到左上角
        Vector3 TopLeft = nowCamera.transform.forward * near + toTop - toRight;
        float scale = TopLeft.magnitude / near;//比值   任一点的向量长/它的深度 = TopLeft.magnitude / near
        TopLeft.Normalize();
        TopLeft *= scale;

        //相机到右上角
        Vector3 TopRight = nowCamera.transform.forward * near + toTop + toRight;
        TopRight.Normalize();
        TopRight *= scale;

        //左下
        Vector3 BottomLeft = nowCamera.transform.forward * near - toTop - toRight;
        BottomLeft.Normalize();
        BottomLeft *= scale;

        //右下
        Vector3 BottomRight = nowCamera.transform.forward * near - toTop + toRight;
        BottomRight.Normalize();
        BottomRight *= scale;

        //数据并入矩阵  蕴含了四个点的方向 以及比值
        Matrix4x4 mar = Matrix4x4.identity;
        mar.SetRow(0, BottomLeft);
        mar.SetRow(1, BottomRight);
        mar.SetRow(2, TopRight);
        mar.SetRow(3, TopLeft);
        mat.SetMatrix("_ViewRectInf", mar);//传递给shader

        //视角空间*投影矩阵  的逆矩阵
        Matrix4x4 InverseMatrix = (nowCamera.projectionMatrix * nowCamera.worldToCameraMatrix).inverse;
        mat.SetMatrix("_NDCToWorld", InverseMatrix);

        mat.SetFloat("_FogDensity", fogDensity);
        mat.SetColor("_FogColor", fogColor);
        mat.SetFloat("_FogStart", fogStart);
        mat.SetFloat("_FogEnd", fogEnd);


        Graphics.Blit(source, destination, mat);
    }



}

shader:

// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'

Shader "Mytest/FogTestShader"{
	Properties{
		_MainTex("ScreenTex",2D)="white"{}
		_FogDensity("FogDensity",float)=1
		_FogColor("FogColor",Color)=(1,1,1,1)
		_FogStart("FogStart",float)=0.0
		_FogEnd("FogEnd",float)=1.0
	}
	SubShader{



		CGINCLUDE

		#include "UnityCG.cginc"
			float4x4 _ViewRectInf;
			float4x4 _NDCToWorld;

			sampler2D _MainTex;
			float4 _MainTex_TexelSize;
			sampler2D _CameraDepthTexture;
			float _FogDensity;
			fixed4 _FogColor;
			float _FogStart;
			float _FogEnd;

			struct v2f{
				float4 pos:SV_POSITION;
				float2 uv:TEXCOORD0;
				float2 uv_depth:TEXCOORD1;
				float4 inf:TEXCOORD2;
			};

			v2f vert(appdata_img v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.uv = v.texcoord;
				o.uv_depth = v.texcoord;

				int index =0 ;
				//对应左下
				if(v.texcoord.x<0.5 && v.texcoord.y<0.5){
					index = 0;
				}else if(v.texcoord.x >0.5 && v.texcoord.y<0.5){//右下
					index = 1;
				}else if(v.texcoord.x >0.5 && v.texcoord.y > 0.5){//右上
					index = 2;
				}else{//右下
					index = 3;
				}

				#if (UNITY_UV_STARTS_AT_TOP)
				if(_MainTex_TexelSize.y<0){
					o.uv_depth.y = 1 - o.uv_depth.y;
					index = 3-index;
				}
				#endif

				o.inf = _ViewRectInf[index];

				return o;
			}

			fixed4 frag(v2f i):SV_Target{
				//深度值获取
				float lineard = LinearEyeDepth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv_depth));
				//世界坐标 = 相机世界坐标 + 计算出来的偏移
				float3 worldPos = _WorldSpaceCameraPos + lineard * i.inf.xyz;

				float fogDensity = (_FogEnd - worldPos.y)/(_FogEnd - _FogStart);
				fogDensity = saturate(fogDensity * _FogDensity);

				fixed4 finalColor = tex2D(_MainTex,i.uv);
				finalColor.rgb = lerp(finalColor.rgb,_FogColor.rgb,fogDensity);

				return finalColor;	
			}


		ENDCG

		Pass{
			ZTest Always
			ZWrite Off
			Cull Off

			CGPROGRAM

			#pragma vertex vert
			#pragma fragment frag

			
			ENDCG
		}





	}
	FallBack Off
}

景深

高斯模糊

效果:模糊,睁眼时看到的模糊画面,背景模糊

拓展:
模糊:

  • 均值模糊(卷积核相等且相加为1)
  • 中值模糊(邻域像素排序后取中间值)
  • 高斯模糊(卷积核为高斯核)
    基础知识:
    卷积
    在这里插入图片描述
    代码:
    C#代码控制渲染缓存:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;

[ExecuteInEditMode]
[RequireComponent(typeof(Camera))]
public class PostTest2 : MonoBehaviour
{
    [Range(1, 8)]
    public int downsample = 1;

    [Range(0,4)]
    public int iterations = 1;

    [Range(0.2f, 3f)]
    public float blurSize = 0.2f;


  
    public Shader useShader;




    public Material material;
    // Start is called before the first frame update
    void Start()
    {
        
        material = new Material(useShader);

    }

    private void OnRenderImage1(RenderTexture source, RenderTexture destination)
    {
        //屏幕大小
        //int width = source.width;
        //int height = source.height;
        //对原图进行压缩,减少像素计算量
        int width = source.width/downsample;
        int height = source.height/downsample;

        //临时缓存
        RenderTexture tempbuffer = RenderTexture.GetTemporary(width,height,0);
        tempbuffer.filterMode = FilterMode.Bilinear;//双线性



        //两个方向依次计算
        Graphics.Blit(source, tempbuffer, material, 0);
        Graphics.Blit(tempbuffer, destination, material,1);

        RenderTexture.ReleaseTemporary(tempbuffer);
       
        //material.SetFloat("_EdgeOnly", EdgeOnly);
        //material.SetColor("_BackGroundColor", BackGroundColor);
        //material.SetColor("_EdgeColor", EdgeColor);

        //Graphics.Blit(source, destination, material);
    }


    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        int width = source.width / downsample;
        int height = source.height / downsample;

        RenderTexture tempbuffer = RenderTexture.GetTemporary(width, height, 0);
        tempbuffer.filterMode = FilterMode.Bilinear;

        //缩小原图
        Graphics.Blit(source, tempbuffer);

        for(int i = 0;i< iterations; i++)
        {
            material.SetFloat("_BlurSize", i * blurSize+1f);
            RenderTexture buffer0 = RenderTexture.GetTemporary(width, height, 0);
            //高斯计算一个方向的数据 渲染结果存入
            Graphics.Blit(tempbuffer, buffer0, material, 0);
            //清空不用的buffer
            RenderTexture.ReleaseTemporary(tempbuffer);
            tempbuffer = buffer0;
            //计算第二个方向的结果
            buffer0 = RenderTexture.GetTemporary(width, height, 0);
            Graphics.Blit(tempbuffer, buffer0, material, 1);
            //清空多余
            RenderTexture.ReleaseTemporary(tempbuffer);
            tempbuffer = buffer0;
            //最新模糊的数据还存在tempbuffer中进行下次迭代
        }
        //渲染到屏幕
        Graphics.Blit(tempbuffer, destination);

        RenderTexture.ReleaseTemporary(tempbuffer);

    }
}

shader高斯计算:

Shader "Mytest/GSShader"{
	Properties{
		_MainTex("Screen Texture",2D)="white"{}
		_BlurSize("BlurSize",float)=0.6

	}
	SubShader{
		ZTest Always
		Cull Off
		ZWrite Off

		CGINCLUDE

		sampler2D _MainTex;
		float4 _MainTex_TexelSize;
		float _BlurSize;

		
		#include "UnityCG.cginc"

		struct v2f{
			float4 pos:SV_POSITION;
			float2 uv[5]:TEXCOORD0;
		};


		v2f vertV(appdata_base v){
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);
			float2 posuv = v.texcoord;
			o.uv[0] = posuv;
			o.uv[1] = posuv + float2(0,_MainTex_TexelSize.y);
			o.uv[2] = posuv + float2(0,_MainTex_TexelSize.y*2);
			o.uv[3] = posuv - float2(0,_MainTex_TexelSize.y);
			o.uv[4] = posuv - float2(0,_MainTex_TexelSize.y*2);

			return o;
		}

		v2f vertH(appdata_base v){
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);
			float2 posuv = v.texcoord;
			o.uv[0] = posuv;
			o.uv[1] = posuv + float2(_MainTex_TexelSize.x,0);
			o.uv[2] = posuv + float2(_MainTex_TexelSize.x*2,0);
			o.uv[3] = posuv - float2(_MainTex_TexelSize.x,0);
			o.uv[4] = posuv - float2(_MainTex_TexelSize.x*2,0);

			return o;
		}

		fixed4 frag(v2f i):SV_Target{
			float GS[3] = {0.4026,0.2442,0.0545};//高斯核

			float3 sum = GS[0]*tex2D(_MainTex,i.uv[0]).rgb;
			for(int t =1;t<3;t++){
				sum += GS[t]*tex2D(_MainTex,i.uv[t]).rgb;
				sum += GS[t]*tex2D(_MainTex,i.uv[t+2]).rgb;
			}

			return fixed4(sum,1);
		}


		ENDCG

		Pass{
			CGPROGRAM 

			#pragma vertex vertH
			#pragma fragment frag


			ENDCG

		}
		Pass{
			CGPROGRAM 

			#pragma vertex vertV
			#pragma fragment frag


			ENDCG
		}
	}
	FallBack Off
}

运动模糊

原理:相机在抓每一帧时(曝光),如果场景发生变化,就会有模糊的现象;计算机不存在曝光,所以图像都是清晰的,所以需要模拟运动模糊。
方法1:在物体运动时渲染出多个图,进行混合,消耗性能 (简化:将上一帧结果保存,将当前渲染结果叠加到上一帧中)
方法2:使用速度缓存,将每个像素的速度存储,来判断模糊效果 (一:使用深度纹理计算当前像素的世界坐标,再计算出上一帧下的像素位置,来得到速度)

简化版的实现代码:
渲染控制部分:

using System.Collections;
using System.Collections.Generic;
using UnityEngine;

[ExecuteInEditMode]
[RequireComponent(typeof(Camera))]
public class PostTest4 : MonoBehaviour
{
    [Range(0,1)]
    public float BlurAmount = 0.5f;
    public Shader sha;
    public Material mat;
    RenderTexture oldtex;
    private void Start()
    {
        mat = new Material(sha);
    }

    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        if(oldtex == null)
        {
            oldtex = new RenderTexture(source.width, source.height, 0);
            oldtex.hideFlags = HideFlags.HideAndDontSave;
            Graphics.Blit(source, oldtex);
        }

        //不太懂,去掉看效果好像也不影响
        //oldtex.MarkRestoreExpected();

        mat.SetFloat("_Amount", BlurAmount);

        //将当前渲染 混合到 上一帧渲染上
        Graphics.Blit(source, oldtex, mat);

        Graphics.Blit(oldtex, destination);

        
    }



}

shader部分:

Shader "Mytest/MoveBlurShader"{
	Properties{
		_MainTex("MainTexture",2D)="white"{}
		_Amount("Amount",float)=0.5
	}
	SubShader{
		ZWrite Off
		ZTest Always
		Cull Off

		CGINCLUDE
		#include "UnityCG.cginc"
		sampler2D _MainTex;
		float _Amount;

			struct v2f{
				float4 pos:SV_POSITION;
				float2 uv:TEXCOORD0;
			};

			v2f vert(appdata_base v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.uv = v.texcoord;
				return o;
			}
		fixed4 fragAlphaControl(v2f i):SV_Target{
			return fixed4(tex2D(_MainTex,i.uv).rgb,_Amount);
		}

		fixed4 fragImage(v2f i):SV_Target{
			return fixed4(tex2D(_MainTex,i.uv));
		}
		ENDCG
		Pass{
			Blend SrcAlpha OneMinusSrcAlpha

			ColorMask RGB//只有RGB通道会被写入 在和内存中的渲染混合时,只混合RGB   不影响当前渲染的透明通道
			CGPROGRAM

			#pragma vertex vert
			#pragma fragment fragAlphaControl
			
			ENDCG
		}
		Pass{
			Blend One Zero
			ColorMask A//只有A会被写入    使用当前渲染的透明度去混合缓存中的

			CGPROGRAM
			#pragma vertex vert
			#pragma fragment fragImage

			ENDCG
		}
	}
	FallBack Off
}

使用深度纹理一

在这里插入图片描述
主要shader代码:
1.求当前帧的NDC坐标, 由uv的xy和 深度信息 组成
2.需要 当前帧的视角*投影矩阵的逆矩阵 来求出 世界坐标
3.用上一帧和世界坐标再求上一帧的NDC坐标
4.取偏移

fixed4 frag(v2f i) : SV_Target {
			//获取深度纹理中的深度信息
			float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);
			//uv 和深度信息 映射到[-1,1] 即该像素的NDC坐标 
			float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1);
			// 转换到世界坐标
			float4 D = mul(_NowMatrixInverse, H);
			//当前像素的世界坐标
			float4 worldPos = D / D.w;
			
			// 当前像素的当前帧的NDC
			float4 currentPos = H;
			// 当前像素在上一帧的投影
			float4 previousPos = mul(_OldMatrix, worldPos);
			// 转换到NDC 当前像素的上一帧的NDC
			previousPos /= previousPos.w;
			
			// 计算像素两帧间的速度
			float2 velocity = (currentPos.xy - previousPos.xy)/2.0f;
			
			float2 uv = i.uv;
			float4 c = tex2D(_MainTex, uv);
			uv += velocity * _Amount;//amount越大偏移越多
			for (int it = 1; it < 3; it++, uv += velocity * _Amount) {
				float4 currentColor = tex2D(_MainTex, uv);
				c += currentColor;
			}
			c /= 3;
			
			return fixed4(c.rgb, 1.0);
		}

Bloom

效果:让亮的部分向外扩展

原理:
设置一个阈值,将超过阈值的部分存储在渲染纹理中,再对此纹理进行模糊(亮的部分向外扩展),然后将原图和模糊后的图进行混合。

依照原理来写即可
控制渲染脚本:

using System.Collections;
using System.Collections.Generic;
using UnityEngine;

[ExecuteInEditMode]
[RequireComponent(typeof(Camera))]
public class PostTest3 : MonoBehaviour
{
    [Range(1, 8)]
    public int downsample = 1;

    [Range(0,4)]
    public int iterations = 1;

    [Range(0.2f, 3f)]
    public float blurSize = 0.2f;
    [Range(0, 1)]
    public float xx = 0.5f;
  
    public Shader useShader;




    public Material material;
    // Start is called before the first frame update
    void Start()
    {
        
        material = new Material(useShader);

    }

    private void OnRenderImage1(RenderTexture source, RenderTexture destination)
    {
        //屏幕大小
        //int width = source.width;
        //int height = source.height;
        //对原图进行压缩,减少像素计算量
        int width = source.width/downsample;
        int height = source.height/downsample;

        //临时缓存
        RenderTexture tempbuffer = RenderTexture.GetTemporary(width,height,0);
        tempbuffer.filterMode = FilterMode.Bilinear;//双线性



        //两个方向依次计算
        Graphics.Blit(source, tempbuffer, material, 0);
        Graphics.Blit(tempbuffer, destination, material,1);

        RenderTexture.ReleaseTemporary(tempbuffer);
       
        //material.SetFloat("_EdgeOnly", EdgeOnly);
        //material.SetColor("_BackGroundColor", BackGroundColor);
        //material.SetColor("_EdgeColor", EdgeColor);

        //Graphics.Blit(source, destination, material);
    }


    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        material.SetFloat("_XX", xx);

        int width = source.width / downsample;
        int height = source.height / downsample;

        RenderTexture yuantu = RenderTexture.GetTemporary(width, height, 0);
        yuantu.filterMode = FilterMode.Bilinear;
        Graphics.Blit(source, yuantu);
        material.SetTexture("_YuantuTex", yuantu);

        RenderTexture tempbuffer = RenderTexture.GetTemporary(width, height, 0);
        tempbuffer.filterMode = FilterMode.Bilinear;

        //缩小原图  使用shader计算亮部分纹理
        Graphics.Blit(source, tempbuffer,material,2);

        for(int i = 0;i< iterations; i++)
        {
            material.SetFloat("_BlurSize", i * blurSize+1f);
            RenderTexture buffer0 = RenderTexture.GetTemporary(width, height, 0);
            //高斯计算一个方向的数据 渲染结果存入
            Graphics.Blit(tempbuffer, buffer0, material, 0);
            //清空不用的buffer
            RenderTexture.ReleaseTemporary(tempbuffer);
            tempbuffer = buffer0;
            //计算第二个方向的结果
            buffer0 = RenderTexture.GetTemporary(width, height, 0);
            Graphics.Blit(tempbuffer, buffer0, material, 1);
            //清空多余
            RenderTexture.ReleaseTemporary(tempbuffer);
            tempbuffer = buffer0;
            //最新模糊的数据还存在tempbuffer中进行下次迭代
        }
        //渲染到屏幕
        //Graphics.Blit(tempbuffer, destination);
        //插值后渲染到屏幕
        Graphics.Blit(tempbuffer, destination, material, 3);

        RenderTexture.ReleaseTemporary(tempbuffer);
        RenderTexture.ReleaseTemporary(yuantu);

    }
}

shader:

Shader "Mytest/BloomShader"{
	Properties{
		_MainTex("Screen Texture",2D)="white"{}
		_BlurSize("BlurSize",float)=0.6
		_YuantuTex("Yuantu Texture",2D)="white"{}

		_XX("xx",float)=0.5
	}
	SubShader{
		ZTest Always
		Cull Off
		ZWrite Off

		CGINCLUDE

		sampler2D _MainTex;
		float4 _MainTex_TexelSize;
		float _BlurSize;
		sampler2D _YuantuTex;
		float _XX;
		#include "UnityCG.cginc"

		struct v2f{
			float4 pos:SV_POSITION;
			float2 uv[5]:TEXCOORD0;
		};


		v2f vertV(appdata_base v){
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);
			float2 posuv = v.texcoord;
			o.uv[0] = posuv;
			o.uv[1] = posuv + float2(0,_MainTex_TexelSize.y);
			o.uv[2] = posuv + float2(0,_MainTex_TexelSize.y*2);
			o.uv[3] = posuv - float2(0,_MainTex_TexelSize.y);
			o.uv[4] = posuv - float2(0,_MainTex_TexelSize.y*2);

			return o;
		}

		v2f vertH(appdata_base v){
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);
			float2 posuv = v.texcoord;
			o.uv[0] = posuv;
			o.uv[1] = posuv + float2(_MainTex_TexelSize.x,0);
			o.uv[2] = posuv + float2(_MainTex_TexelSize.x*2,0);
			o.uv[3] = posuv - float2(_MainTex_TexelSize.x,0);
			o.uv[4] = posuv - float2(_MainTex_TexelSize.x*2,0);

			return o;
		}

		fixed4 frag(v2f i):SV_Target{
			float GS[3] = {0.4026,0.2442,0.0545};//高斯核

			float3 sum = GS[0]*tex2D(_MainTex,i.uv[0]).rgb;
			for(int t =1;t<3;t++){
				sum += GS[t]*tex2D(_MainTex,i.uv[t]).rgb;
				sum += GS[t]*tex2D(_MainTex,i.uv[t+2]).rgb;
			}

			return fixed4(sum,1);
		}


		ENDCG

		Pass{
			CGPROGRAM 

			#pragma vertex vertH
			#pragma fragment frag


			ENDCG

		}
		Pass{
			CGPROGRAM 

			#pragma vertex vertV
			#pragma fragment frag


			ENDCG
		}

		//亮部选取
		Pass{
			CGPROGRAM

			#pragma vertex vert
			#pragma fragment fragLight

			v2f vert(appdata_base v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.uv[0] = v.texcoord;
				return o;
			}

			//这是书里找亮度的判定,为啥非跟这几个数过不去?美术效果好?
			fixed luminance(fixed4 color){
				return 0.2125 * color.r + 0.7154* color.g + 0.0721 * color.b;
			}

			fixed4 fragLight(v2f i):SV_Target{
				//我这里是  rgb相加大于2认定为亮
				float lightColor = 2;
				fixed3 color = tex2D(_MainTex,i.uv[0]).rgb;
				if(lightColor > color.r+color.g+color.b){
					discard;
				}

				//书里[0-1]
				//return clamp(luminance(color)-_valuecontrol,0,1);

				return fixed4(color,1);
			}



			ENDCG
		}

		Pass{
			CGPROGRAM

			#pragma vertex vert
			#pragma fragment fragYT

			v2f vert(appdata_base v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.uv[0] = v.texcoord;
				return o;
			}
			fixed4 fragYT(v2f i):SV_Target{

				fixed3 yuan = tex2D(_YuantuTex,i.uv[0]);
				fixed3 light = tex2D(_MainTex,i.uv[0]);

				fixed3 uscolor = lerp(yuan,light,_XX);
				return fixed4(uscolor,1);
			}


			ENDCG
		}

	}
	FallBack Off
}

边缘检测

效果:描边

基础知识:
【卷积】:卷积操作的神奇之处在于卷积核(算子)
思路:
找到图像中的边-两个有明显区别的像素之间应该就是有边,将两个像素之间的差值用梯度表示,则梯度越大越可能是边。
在这里插入图片描述

基于像素的边缘检测

代码:

Shader "Mytest/EdgeShader"{
	Properties{
		_MainTex("ScreenTex",2D)="white"{}
		_EdgeOnly("EdgeOnly",Range(0,1))=1
		_BackGroundColor("BackGroundColor",Color)=(1,1,1,1)
		_EdgeColor("EdgeColor",Color)=(0,0,0,0)
	}
	SubShader{
		Pass{
			ZWrite Off
			ZTest Always
			Cull Off

			CGPROGRAM
			#pragma vertex vert
			#pragma fragment frag
			#include "UnityCG.cginc"

			sampler2D _MainTex;
			float4 _MainTex_TexelSize;
			fixed4 _BackGroundColor;
			fixed4 _EdgeColor;
			float _EdgeOnly;

			struct v2f{
				float4 pos:SV_POSITION;
				float2 uv[9]:TEXCOORD0;//这里为啥啊???float2 uv[9]是个啥玩意
			};

			v2f vert(appdata_base v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);

				float2 posuv = v.texcoord;

				//9个位置uv   4是当前
				o.uv[0] = posuv - _MainTex_TexelSize;
				o.uv[1] = posuv - float2(0,_MainTex_TexelSize.y);
				o.uv[2] = posuv + float2(_MainTex_TexelSize.x,-1 *_MainTex_TexelSize.y);

				o.uv[3] = posuv - float2(_MainTex_TexelSize.x,0);
				o.uv[4] = posuv;
				o.uv[5] = posuv + float2(_MainTex_TexelSize.x,0);

				o.uv[6] = posuv - float2(_MainTex_TexelSize.x,-1 * _MainTex_TexelSize.y);
				o.uv[7] = posuv + float2(0,_MainTex_TexelSize.y);
				o.uv[8] = posuv + _MainTex_TexelSize;

				return o;
			}
			//返回一个亮度
			fixed luminance(fixed4 color) {
				//return  0.2125 * color.r + 0.7154 * color.g + 0.0721 * color.b; 
				//return  2 * color.r + 2 * color.g +  2 * color.b; 
				return color;
			}
			float Sobel(v2f i){
				//卷积核xy
				const float Gx[9] = {-1,0,1,
									-2,0,2,
									-1,0,1};
				const float Gy[9] = {-1,-2,-1,
									0,0,0,
									1,2,1};
				float texColor;
				float edgeX = 0;
				float edgeY = 0;
				for (int it = 0; it < 9; it++) {
					texColor = luminance(tex2D(_MainTex, i.uv[it]));
					edgeX += texColor * Gx[it];
					edgeY += texColor * Gy[it];
				}

				//edgeX与edgeY平方和越大  越是边的可能大  所以这里edge越小越是边的可能性大
				float edge = 1 - abs(edgeX) - abs(edgeY);
				
				return edge;
			}


			fixed4 frag(v2f i):SV_Target{

				half edge = Sobel(i);
				
				//边和原图
				fixed4 withEdgeColor = lerp(_EdgeColor, tex2D(_MainTex, i.uv[4]), edge);
				//边和背景
				fixed4 onlyEdgeColor = lerp(_EdgeColor, _BackGroundColor, edge);
				//原图和背景
				return lerp(withEdgeColor, onlyEdgeColor, _EdgeOnly);


			}


			ENDCG
		}
	}
	FallBack Off
}
基于深度和法线的边缘检测

C# 参数传递:

[ImageEffectOpaque]//仅对不透明物体后开始
    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        mat.SetFloat("_EdgeOnly", edgeOnly);
        mat.SetColor("_EdgeColor", edgeColor);
        mat.SetColor("_BackGroundColor", backgroundColor);
        mat.SetFloat("_SampleDistance", sampleDistance);
        mat.SetVector("_Sensitivity", new Vector4(sensitivityNormal, sensitivityDepth, 0, 0));
        Graphics.Blit(source, destination, mat);
    }

shader:

Shader "Mytest/EdgeByDepthShader"{
	Properties{
		_MainTex("ScreenTexture",2D)="white"{}
		_EdgeOnly("Only Edge",float)=0.5
		_EdgeColor("Edge Color",Color)=(1,1,1,1)
		_BackGroundColor("BackGround Color",Color)=(0,0,0,0)
		_SampleDistance("Sample Distance",float)=1
		_Sensitivity("Sensitivity",Vector)=(0,0,0,0)
	}
	SubShader{
		
		CGINCLUDE

		#include "UnityCG.cginc"

		sampler2D _MainTex;
		float4 _MainTex_TexelSize;
		sampler2D _CameraDepthNormalsTexture;


		float _EdgeOnly;
		fixed4 _EdgeColor;
		fixed4 _BackGroundColor;
		float _SampleDistance;
		float4 _Sensitivity;

		struct v2f{
			float4 pos:SV_POSITION;
			float2 uv[5]:TEXCOORD0;
		};


		float NormalDepthCheck(float4 t1,float4 t2){
			float2 normal1 = t1.xy;
			float d1 = DecodeFloatRG(t1.zw);
			float2 normal2 = t2.xy;
			float d2 = DecodeFloatRG(t2.zw);

			float2 diffNormal = abs(normal1 - normal2) * _Sensitivity.x;
			int isSame = (diffNormal.x + diffNormal.y) < 0.1;

			float depth = abs(d1 - d2) * _Sensitivity.y;
			int isSameDepth = depth <0.1 * d1;

			return isSame * isSameDepth? 1:0;
		}


		v2f vert(appdata_img v){
			v2f o;
			o.pos = UnityObjectToClipPos(v.vertex);

			o.uv[0] = v.texcoord;

			#if UNITY_UV_STARTS_AT_TOP
			if(_MainTex_TexelSize.y<0){
				o.uv[0].y = 1 - o.uv[0].y;
			}
			#endif

			//roberts算子
			o.uv[1] = o.uv[0] + _MainTex_TexelSize.xy * fixed2(1,1) * _SampleDistance;
			o.uv[2] = o.uv[0] + _MainTex_TexelSize.xy * fixed2(-1,-1) * _SampleDistance;
			o.uv[3] = o.uv[0] + _MainTex_TexelSize.xy * fixed2(-1,1) * _SampleDistance;
			o.uv[4] = o.uv[0] + _MainTex_TexelSize.xy * fixed2(1,-1) * _SampleDistance;
			

			return o;
		}


		fixed4 frag(v2f i):SV_Target{
			//四个角的深度和法线采样
			float4 sample1 = tex2D(_CameraDepthNormalsTexture,i.uv[1]);
			float4 sample2 = tex2D(_CameraDepthNormalsTexture,i.uv[2]);
			float4 sample3 = tex2D(_CameraDepthNormalsTexture,i.uv[3]);
			float4 sample4 = tex2D(_CameraDepthNormalsTexture,i.uv[4]);

			half edge = 1;

			//robert算法
			edge *= NormalDepthCheck(sample1,sample2);//斜对角
			edge *= NormalDepthCheck(sample3,sample4);

			fixed4 withEdgeColor = lerp(_EdgeColor,tex2D(_MainTex,i.uv[0]),edge);
			fixed4 withBackgroundColor = lerp(_EdgeColor,_BackGroundColor,edge);
			
			return lerp(withEdgeColor,withBackgroundColor,_EdgeOnly);
		}
		ENDCG

		Pass{
			ZWrite Off
			ZTest Always
			Cull Off

			CGPROGRAM
			#pragma vertex vert
			#pragma fragment frag

			ENDCG
		}
	}
	FallBack Off
}
对单个物体检测

在这里插入图片描述

一个实现后效的基本框架

抓取屏幕

  • 函数 OnRenderImage(RenderTexture src,RenderTexture dest)
    [源纹理 / 目标纹理 / 在方法体中处理]
    默认情况下在场景的不透明和透明全部渲染完后再调用(不透明渲染队列<=2500)
    可以在函数前添加[ImageEffectOpaque]属性 来只对不透明物体影响
  • 函数 Graphics.Blit(Textuer src,RenderTexture dest ,Material mat, int pass=-1)
    [源 / 目标 / 对源处理的shader / 默认为-1(shader从头到尾执行pass)否则调用指定pass ]
    当dest为null时直接渲染在屏幕上

在给后效写shader时,没在properties中声明变量,发现有bug,还是声明下吧虽然用不到

深度和法线纹理的使用

对于延迟渲染,可以直接获得
想要让物体出现在深度和法线纹理中,必须正确设置RenderType 渲染队列<2500

unity在背后做的工作

深度:
深度值
法线方向:
法线方向

使用深度纹理获取世界坐标

通过NDC坐标,进行矩阵乘法的方式

视角*投影矩阵 的 逆矩阵 乘 NDC 得到世界坐标的齐次坐标 除以w即 世界坐标
耗费性能
C#传入:

 //当前的相机视角投影矩阵
        Matrix4x4 nowMatrix = nowCamera.projectionMatrix * nowCamera.worldToCameraMatrix;
        //逆矩阵
        Matrix4x4 inversenowMatrix = nowMatrix.inverse;
        mat.SetMatrix("_NowMatrixInverse", inversenowMatrix);

shader:

//获取深度纹理中的深度信息
			float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);
			//uv 和深度信息 映射到[-1,1] 即该像素的NDC坐标 
			float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1);
			// 转换到世界坐标
			float4 D = mul(_NowMatrixInverse, H);
			//当前像素的世界坐标
			float4 worldPos = D / D.w;

通过相机的世界坐标,以偏移量的方式获取像素坐标

相机的近剪裁屏幕的四个角 可以表示出来,四个顶点所在的射线 可以通过相似得出
C#参数:

//相机参数
        float fov = nowCamera.fieldOfView;
        float near = nowCamera.nearClipPlane;
        float far = nowCamera.farClipPlane;
        float aspect = nowCamera.aspect;

        //中心点
        float halfHeight = near * Mathf.Tan(fov * 0.5f * Mathf.Deg2Rad);
        Vector3 toRight = nowCamera.transform.right * halfHeight * aspect;
        Vector3 toTop = nowCamera.transform.up * halfHeight;


        //相机到左上角
        Vector3 TopLeft = nowCamera.transform.forward * near + toTop - toRight;
        float scale = TopLeft.magnitude / near;//比值   任一点的向量长/它的深度 = TopLeft.magnitude / near
        TopLeft.Normalize();
        TopLeft *= scale;

        //相机到右上角
        Vector3 TopRight = nowCamera.transform.forward * near + toTop + toRight;
        TopRight.Normalize();
        TopRight *= scale;

        //左下
        Vector3 BottomLeft = nowCamera.transform.forward * near - toTop - toRight;
        BottomLeft.Normalize();
        BottomLeft *= scale;

        //右下
        Vector3 BottomRight = nowCamera.transform.forward * near - toTop + toRight;
        BottomRight.Normalize();
        BottomRight *= scale;

        //数据并入矩阵  蕴含了四个点的方向 以及比值
        Matrix4x4 mar = Matrix4x4.identity;
        mar.SetRow(0, BottomLeft);
        mar.SetRow(1, BottomRight);
        mar.SetRow(2, TopRight);
        mar.SetRow(3, TopLeft);
        mat.SetMatrix("_ViewRectInf", mar);//传递给shader

        //视角空间*投影矩阵  的逆矩阵
        Matrix4x4 InverseMatrix = (nowCamera.projectionMatrix * nowCamera.worldToCameraMatrix).inverse;
        mat.SetMatrix("_NDCToWorld", InverseMatrix);

        mat.SetFloat("_FogDensity", fogDensity);
        mat.SetColor("_FogColor", fogColor);
        mat.SetFloat("_FogStart", fogStart);
        mat.SetFloat("_FogEnd", fogEnd);

shader处理:

v2f vert(appdata_img v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.uv = v.texcoord;
				o.uv_depth = v.texcoord;

				int index =0 ;
				//对应左下
				if(v.texcoord.x<0.5 && v.texcoord.y<0.5){
					index = 0;
				}else if(v.texcoord.x >0.5 && v.texcoord.y<0.5){//右下
					index = 1;
				}else if(v.texcoord.x >0.5 && v.texcoord.y > 0.5){//右上
					index = 2;
				}else{//右下
					index = 3;
				}

				#if (UNITY_UV_STARTS_AT_TOP)
				if(_MainTex_TexelSize.y<0){
					o.uv_depth.y = 1 - o.uv_depth.y;
					index = 3-index;
				}
				#endif

				o.inf = _ViewRectInf[index];

				return o;
			}

			fixed4 frag(v2f i):SV_Target{
				//深度值获取
				float lineard = LinearEyeDepth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture,i.uv_depth));
				//世界坐标 = 相机世界坐标 + 计算出来的偏移
				float3 worldPos = _WorldSpaceCameraPos + lineard * i.inf.xyz;

				float fogDensity = (_FogEnd - worldPos.y)/(_FogEnd - _FogStart);
				fogDensity = saturate(fogDensity * _FogDensity);

				fixed4 finalColor = tex2D(_MainTex,i.uv);
				finalColor.rgb = lerp(finalColor.rgb,_FogColor.rgb,fogDensity);

				return finalColor;	
			}

非真实感渲染

使用特殊的渲染方式,使画面达到不同的风格 卡通 ,水彩 水墨 描边 色块等

描边

在这里插入图片描述
shader:

Shader "Mytest/UnRealShader"{
	Properties{
		_Color("DiffuseColor",Color)=(1,1,1,1)
		_MainTex("Main Texture",2D)="white"{}
		_Ramp("Ramp Texture",2D)="white"{}
		_OutLine("Line",Range(0,1))=1
		_OutLineColor("OutLine Color",Color)=(1,1,1,1)
		_Specular("Specular",Color)=(1,1,1,1)
		_SpecularSize("Specular Size",float) = 0.01

	}
	SubShader{
		CGINCLUDE

		fixed4 _Color;
		sampler2D _MainTex;
		float4 _MainTex_ST;
		sampler2D _Ramp;
		float4 _Ramp_ST;
		float _OutLine;
		fixed4 _OutLineColor;
		fixed4 _Specular;
		float _SpecularSize;

		ENDCG

		Pass{
			NAME "OUTLINE"
			Cull Front

			CGPROGRAM

			#pragma vertex vert
			#pragma fragment frag

			#include "UnityCG.cginc"


			struct v2f{
				float4 pos:SV_POSITION;
				
				float2 uv:TEXCOORD0;
			};


			v2f vert(appdata_base v){
				v2f o;
				float4 viewPos = mul(UNITY_MATRIX_MV,v.vertex);
				float3 viewNormal = mul((float3x3)UNITY_MATRIX_IT_MV,v.normal);
				viewNormal.z = -0.5;
				viewPos = viewPos + float4(normalize(viewNormal),0) * _OutLine;
				o.pos = mul(UNITY_MATRIX_P,viewPos);
				//o.normal = UnityObjectToWorldNormal(v.normal);
				return o;
			}

			fixed4 frag(v2f i):SV_Target{
				return float4(_OutLineColor.rgb,1);
			}


			ENDCG
		}



		Pass{
			Tags{
				"LightMode"="ForwardBase"
			}
			Cull Back

			CGPROGRAM
		
			struct v2f{
				float4 pos:SV_POSITION;
				float2 uv:TEXCOORD0;
				float3 worldNormal:TEXCOORD1;
				float3 worldPos:TEXCOORD2;
				SHADOW_COORDS(3);
			};

			v2f vert(appdata_base i){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.uv = TRANSFORM_TEX(v.texcoord,_MainTex);
				o.worldNormal = UnityObjectToWorldNormal(v.normal);
				o.worldPos = unity_Object2World(v.vertex).xyz;
				TRANSFER_SHADOW(o);
				return o;
			}

			fixed4 frag(v2f i):SV_Target{
				
				fixed3 c = tex2D(_MainTex,i.uv);
				
				//反射率
				fixed3 albedo = c.rgb * _Color.rgb;
				
				//环境光
				fixed3 ambientColor = UNITY_LIGHTMODEL_AMBIENT.xyz * albedo;
				
				//光消减
				UNITY_LIGHT_ATTENUATION(atten,i,i.worldPos);

			

				fixed3 worldNormal = normalize(i.worldNormal);
				fixed3 worldLightDir = normalize( UnityWorldSpaceLightDir(i.worldPos) );
				fixed3 worldViewDir = normalize( UnityWorldSpaceViewDir(i.worldPos) );
				//bf模型  视角方向和光照方向的合 
				fixed3 newDir = normalize( worldLightDir + worldViewDir);
				dot(newDir,worldNormal)

				//半兰伯特 漫反射
				fixed diff = dot(worldNormal,worldLightDir);
				diff =( diff*0.5+0.5 ) * atten;
				fixed3 diffuseColor = _LightColor0.rgb * albedo * tex2D(_Ramp,float2(diff,diff)).rgb;

				//高光反射
				fixed spec = dot(worldNormal,newDir);
				fixed w = fwidth(spec) * 2;
				fixed3 specularColor = _Specular.rgb * lerp(0,1,smoothstep(-w,w,spec + _SpecularSize -1)) * step(0.0001,_SpecularSize);
				
				return fixed4(ambientColor + diffuseColor + specularColor,1);
			
			}
		
			ENDCG
		}

	}
	FallBack "Diffuse"
}

素描风格

主要思路:
计算顶点光照,将光强度映射到 素描风格的纹理贴图中(越亮素描线越少,越暗素描线越多)

shader:

Shader "Mytest/HatchShader"{
	Properties{
		_MainColor("Main Color",Color)=(1,1,1,1)
		_SampleScale("Sample Scale",Range(0,8))=1//采样缩放

		//采样图
		_Map0("Map 0",2D)="white"{}
		_Map1("Map 1",2D)="white"{}
		_Map2("Map 2",2D)="white"{}
		_Map3("Map 3",2D)="white"{}
		_Map4("Map 4",2D)="white"{}
		_Map5("Map 5",2D)="white"{}
	
	}
	SubShader{
		Tags{				
			"RenderType"="Opaque"
			"Queue"="Geometry"
		}
		Pass{
			Tags{

				"LightMode"="ForwardBase"
			}
			CGPROGRAM

			#pragma vertex vert
			#pragma fragment frag

			#include "UnityCG.cginc"
			#include "AutoLight.cginc"

			fixed4 _MainColor;
			float _SampleScale;
			sampler2D _Map0;
			sampler2D _Map1;
			sampler2D _Map2;
			sampler2D _Map3;
			sampler2D _Map4;
			sampler2D _Map5;

			struct v2f{
				float4 pos:SV_POSITION;
				float2 uv:TEXCOORD0;
				
				float3 sample_uv1:TEXCOORD1;
				float3 sample_uv2:TEXCOORD2;
				float3 worldPos:TEXCOORD3;
				SHADOW_COORDS(4)
			};

			//求采样uv   (根据漫反射强度)
			v2f vert(appdata_base i){
				v2f o;
				o.pos = UnityObjectToClipPos(i.vertex);
				//uv* 采样密度  [0-8]
				o.uv = i.texcoord.xy * _SampleScale;
				
				o.worldPos = mul(unity_ObjectToWorld,i.vertex).xyz;

				//世界空间下法线
				fixed3 worldNormal = normalize(UnityObjectToWorldNormal(i.normal)).xyz;
				//世界空间下光照方向
				fixed3 worldLightDir = normalize( UnityWorldSpaceLightDir(o.worldPos) ).xyz;
				//diff 0-1
				float diff = max(0, dot(worldNormal,worldLightDir) );
				
				diff *= 7;

				o.sample_uv1 = float3(0,0,0);
				o.sample_uv2 = float3(0,0,0);

				//划分uv 越亮线越少越白 越暗线越多 [高光变白,其他两种贴图0-1 1-0交叉]
				if(diff>6){
					
				}else if(diff>5){
					o.sample_uv1.x = diff - 5;
				}else if(diff>4){
					o.sample_uv1.x = diff - 4;
					o.sample_uv1.y = 1-o.sample_uv1.x;
				}else if(diff>3){
					o.sample_uv1.y = diff - 3;
					o.sample_uv1.z = 1-o.sample_uv1.y;
				}else if(diff>2){
					o.sample_uv1.z = diff - 2;
					o.sample_uv2.x = 1-o.sample_uv1.z;
				}else if(diff>1){
					o.sample_uv2.x = diff - 1;
					o.sample_uv2.y = 1-o.sample_uv2.x;
				}else{
					o.sample_uv2.y = diff;
					o.sample_uv2.z = 1-o.sample_uv2.y;
				}

				//阴影
				TRANSFER_SHADOW(o);

				return o;
			}


			fixed4 frag(v2f i):SV_Target{
				
				fixed4 colorMap0 = tex2D(_Map0,i.uv) * i.sample_uv1.x;
				fixed4 colorMap1 = tex2D(_Map1,i.uv) * i.sample_uv1.y;
				fixed4 colorMap2 = tex2D(_Map2,i.uv) * i.sample_uv1.z;
				fixed4 colorMap3 = tex2D(_Map3,i.uv) * i.sample_uv2.x;
				fixed4 colorMap4 = tex2D(_Map4,i.uv) * i.sample_uv2.y;
				fixed4 colorMap5 = tex2D(_Map5,i.uv) * i.sample_uv2.z;

				fixed4 colorWhite = fixed4(1,1,1,1) * (1 - i.sample_uv1.x - i.sample_uv1.y - i.sample_uv1.z - i.sample_uv2.x - i.sample_uv2.y - i.sample_uv2.z);
			
				fixed4 finalColor = colorMap0 + colorMap1 +colorMap2 +colorMap3 +colorMap4 +colorMap5 +colorWhite;
			
				UNITY_LIGHT_ATTENUATION(atten,i,i.worldPos);

				return fixed4(finalColor.rgb * _MainColor.rgb * atten,1);
			}
			ENDCG
		}
	}
	FallBack "Diffuse"
}

使用噪声

噪声应用比较广泛了,粒子系统里能用到,shader里能用到

溶解

shader:

Shader "Mytest/DissolveShader"{
	Properties{
		_MainTex("Main Texture",2D)="white"{}
		//溶解参数
		_BurnAmount("Burn Amount",Range(0,1))=0//程度
		_BurnLine("Burn Line Width",Range(0,0.3))=0.1//边缘宽度
		_BurnColorFirst("FireFirst Color",Color)=(1,1,1,1)//边缘颜色
		_BurnColorSecond("FireSecond Color",Color)=(0,0,0,0)
		_BurnMap("Burn Map",2D)="white"{}//噪声图
		_BumpMap("Normal Map",2D)="bump"{}//法线
	}
	SubShader{
		CGINCLUDE

		sampler2D _MainTex;
			float4 _MainTex_ST;

			float _BurnAmount;
			float _BurnLine;
			fixed4 _BurnColorFirst;
			fixed4 _BurnColorSecond;
			sampler2D _BurnMap;
			float4 _BurnMap_ST;

			sampler2D _BumpMap;
			float4 _BumpMap_ST;


		ENDCG
		Pass{
			Tags{
				"LightMode"="ForwardBase"
			}
			Cull Off

			CGPROGRAM

			#pragma vertex vert
			#pragma fragment frag
			//#pragma multi_compile_fwdbase
			#include "UnityCG.cginc"
			#include "AutoLight.cginc"
			#include "Lighting.cginc"

			

			struct v2f{
				float4 pos:SV_POSITION;
				float2 uv:TEXCOORD0;
				float2 uvBurn:TEXCOORD1;
				float2 uvBump:TEXCOORD2;
				float3 worldPos:TEXCOORD3;
				float3 lightDir:TEXCOORD4;
				SHADOW_COORDS(5)
			};

			v2f vert(appdata_tan v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				o.uv = TRANSFORM_TEX(v.texcoord,_MainTex);
				o.uvBump = TRANSFORM_TEX(v.texcoord,_BumpMap);
				o.uvBurn = TRANSFORM_TEX(v.texcoord,_BurnMap);

				o.worldPos = mul(unity_ObjectToWorld,v.vertex).xyz;

				//切线空间下的光照方向
				TANGENT_SPACE_ROTATION;
				o.lightDir = mul(rotation,ObjSpaceLightDir(v.vertex)).xyz;

				TRANSFER_SHADOW(o);
				
				return o;
			}

			fixed4 frag(v2f i):SV_Target{
				//噪声图
				fixed3 burn = tex2D(_BurnMap,i.uvBurn).rgb;
				//反射系数
				fixed3 albedo = tex2D(_MainTex,i.uv).rgb;
				//小于的直接丢弃
				clip(burn.r - _BurnAmount);
				
				
				

				//环境光
				fixed3 ambientColor = UNITY_LIGHTMODEL_AMBIENT.rgb * albedo;
				//切线空间下的光方向 和 法线
				float3 tangentLightDir = normalize(i.lightDir);
				fixed3 tangentNormal = UnpackNormal(tex2D(_BumpMap,i.uvBump));
				//漫反射
				fixed3 diffuseColor = _LightColor0.rgb * albedo * max(0,dot(tangentNormal,tangentLightDir));
				
				//过渡 溶解边缘颜色
				//burn.r - _BurnAmount 噪声图和溶解程度差值
				//smoothstep() 溶解边缘距离越大 smoothstep越大
				//t 越不易溶解的地方  t越小 【0-1】
				fixed t = 1 - smoothstep(0,_BurnLine, burn.r - _BurnAmount);
				//离溶解区越近 越接近second颜色
				fixed3 burnColor = lerp(_BurnColorFirst,_BurnColorSecond,t);
				//burnColor = pow(burnColor,5);

				UNITY_LIGHT_ATTENUATION(atten,i,i.worldPos);
				fixed3 finalColor = lerp( ambientColor + diffuseColor *atten,burnColor,t*step(0.0001,_BurnAmount)   );

				return fixed4(finalColor.rgb,1);
			}
			ENDCG
		}

		//阴影
		Pass{
			Tags{
				"LightMode"="ShadowCaster"
			}
			
			CGPROGRAM
			
			#pragma vertex vert
			#pragma fragment frag

			#pragma multi_compile_shadowcaster
			#include "UnityCG.cginc"

			struct v2f{
				V2F_SHADOW_CASTER;//1
				float2 uvBurn:TEXCOORD1;
			};

			v2f vert(appdata_base v){
				v2f o;
				TRANSFER_SHADOW_CASTER_NORMALOFFSET(o);//2
				o.uvBurn = TRANSFORM_TEX(v.texcoord,_BurnMap);
				return o;
			}

			fixed4 frag(v2f i):SV_Target{
				fixed3 burn = tex2D(_BurnMap,i.uvBurn).rgb;
				clip(burn.r - _BurnAmount);
				
				SHADOW_CASTER_FRAGMENT(i);//3
			}


			ENDCG
		}



	}
	FallBack Off
}

水面波纹

稍微又理解了一下 矩阵变换,切线空间,反射,剪裁空间,NDC 等
shader:

Shader "Mytest/WaterWaveShader"{
	Properties{
		_Color("Color",Color)=(1,1,1,1)
		_MainTex("Base Texture",2D)="white"{}
		_WaveMap("Wave Map",2D)="bump"{}
		_CubeMap("Cube Map",Cube)="_Skybox"{}

		_WaveXSpeed("Wave X Speed",Range(-0.1,0.1))=0.01
		_WaveYSpeed("Wave Y Speed",Range(-0.1,0.1))=0.01
		_Distortion("Distortion",Range(0,10000))=10
	}
	SubShader{
		Tags{
			"RenderType"="Opaque"
			"Queue"="Transparent"
		}
		//抓取屏幕
		GrabPass{"_RefractionTex"}
		CGINCLUDE

			#include "UnityCG.cginc"

			fixed4 _Color;
			sampler2D _MainTex;
			float4 _MainTex_ST;
			sampler2D _WaveMap;
			float4 _WaveMap_ST;
			samplerCUBE _CubeMap;
			float _WaveXSpeed;
			float _WaveYSpeed;
			float _Distortion;

			sampler2D _RefractionTex;
			float4 _RefractionTex_TexelSize;

			struct v2f{
				float4 pos:SV_POSITION;
				float4 scrPos:TEXCOORD0;
				float4 uv:TEXCOORD1;
				float4 TtoW0:TEXCOORD2;
				float4 TtoW1:TEXCOORD3;
				float4 TtoW2:TEXCOORD4;
			};


		ENDCG
		
		Pass{
			
			CGPROGRAM
			
			#pragma vertex vert
			#pragma fragment frag
			
			v2f vert(appdata_tan v){
				v2f o;
				o.pos = UnityObjectToClipPos(v.vertex);
				//齐次坐标   ComputeGrabScreenPos -1-1映射到 0-1
				o.scrPos = ComputeGrabScreenPos(o.pos);
			
				o.uv.xy = TRANSFORM_TEX(v.texcoord,_MainTex);
				o.uv.zw = TRANSFORM_TEX(v.texcoord,_WaveMap);
			
				float3 worldPos = mul(unity_ObjectToWorld,v.vertex).xyz;

				/* 
				切线空间的变换矩阵   z轴法线  x轴切线  y轴 由法线和切线获取 
				将三个轴全部转换到世界坐标下
				则构成的矩阵 就是  切线到世界的变换矩阵
				*/
				fixed3 worldNormal = UnityObjectToWorldNormal(v.normal);
				//切线转世界空间
				fixed3 worldTangent = UnityObjectToWorldDir(v.tangent.xyz);
				//计算副切线  *w决定正确方向
				fixed3 worldBinormal = cross(worldNormal,worldTangent) * v.tangent.w;

				//切线到世界的变换矩阵  A->B
				o.TtoW0 = float4(worldTangent.x,worldBinormal.x,worldNormal.x,worldPos.x);
				o.TtoW1 = float4(worldTangent.y,worldBinormal.y,worldNormal.y,worldPos.y);
				o.TtoW2 = float4(worldTangent.z,worldBinormal.z,worldNormal.z,worldPos.z);

				return o;
			}

			fixed4 frag(v2f i):SV_Target{

				float2 speed = _Time.y * float2(_WaveXSpeed,_WaveYSpeed);
				
				//提取法线纹理中的法线解压(切线空间)
				fixed3 bump1 = UnpackNormal( tex2D(_WaveMap,i.uv.zw + speed) ).rgb;
				fixed3 bump2 = UnpackNormal( tex2D(_WaveMap,i.uv.zw - speed) ).rgb;
				//fixed3 bump = normalize(bump2);//只递增一个方向就变成流向一个方向 在纹理的一个方向不断循环
				fixed3 bump = normalize(bump1 + bump2);//

				//纹理扰动 * 反射扭曲率 * 屏幕单位
				float2 Waveoffset = bump.xy * _Distortion * _RefractionTex_TexelSize.xy;
				i.scrPos.xy = Waveoffset * i.scrPos.z + i.scrPos.xy;//物体的屏幕位置(齐次) + 偏移(偏移和深度关联)
				fixed3 refrCol = tex2D(_RefractionTex,i.scrPos.xy/i.scrPos.w).rgb;//抓取的渲染纹理采样 

				//法线从切线空间转到世界空间
				bump = normalize( half3(dot(i.TtoW0.xyz,bump),dot(i.TtoW1.xyz,bump),dot(i.TtoW2.xyz,bump)));
				fixed4 texColor = tex2D(_MainTex,i.uv.xy + speed);
				//入射角和法线求 反射角 
				float3 worldPos = float3(i.TtoW0.w,i.TtoW1.w,i.TtoW2.w);
				fixed3 worldViewDir = normalize( UnityWorldSpaceViewDir(worldPos) );
				fixed3 refDir = reflect(-worldViewDir,bump);
				//反射角采样cube  
				fixed3 refColor = texCUBE(_CubeMap,refDir).rgb * texColor.rgb * _Color.rgb;
				//折射率?菲涅尔系数
				fixed fresnel = pow(1 - saturate(dot(worldViewDir,bump)),4);
				//fixed3 finalColor = refColor * fresnel + refrCol * (1 - fresnel);
				fixed3 finalColor = lerp(refrCol,refColor,fresnel);
				return fixed4(finalColor,1);
			}
			ENDCG
		}
	}
	FallBack Off
}

雾效

原来的雾效效果,加一个噪声图采样,时间偏移,采样结果影响 雾效系数即可

噪声图

在这里插入图片描述

渲染优化技术

批处理,lod,遮挡剔除
纹理大小,图集
shader计算
过多的模型顶点
屏幕缓存分辨率

CPU

draw call:搜集渲染数据,改变渲染状态,发送渲染命令

在每次渲染状态中渲染更多数据
动态合批:同个材质(其他影响) 每帧合并网格
静态合批:不动的物体 运行前就合并到网格 会占更多内存(1份变多份)

共享材质:

  1. 纹理不同可以合并成大图 改变采样坐标
    在这里插入图片描述

逻辑计算 物理模拟

GPU

顶点数 片元数
顶点优化:

  1. 建模优化 移除不必要的硬边以及纹理衔接,避免边界平滑 纹理分离
  2. lod
  3. 遮挡剔除
    片元优化:
    减少overdraw 像素重复绘制
  4. 控制渲染顺序 原理:被深度测试剔除后不再渲染
  5. 透明物体
  6. 实时光照和阴影
  7. 带宽:纹理大小 mipmap减小纹理像素 纹理压缩
  8. shader LOD技术
    在这里插入图片描述

shader计算次数
纹理分辨率

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值