参考文献https://www.cnblogs.com/evennl/p/3894438.html
传统的高斯模糊实现的原理是用三个通道(降采样通道、横向模糊通道和纵向模糊通道),降采样通道决定采样的步长,然后以当前uv为中心,当前步长为基准,采样左三右三及自身共七个点为横向模糊的pass,采样上三下三及自身七个点为纵向模糊的pass,每个采样值需乘以高斯分布曲线的取值(系数)加权平均。
本文采用九宫格uv采样,取高斯分布曲线近似值得到3x3卷积核,加权平均计算最终颜色,一个pass搞定。
下面是shader代码:参考《Shader入门精要》
Shader "Custom/Gaussian"
{
Properties
{
_Color("MainColor",COLOR) = (1,1,1,1)
_MainTex ("Texture", 2D) = "white" {}
_BlurLevel("BlurLevel",float) = 3
}
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _MainTex_TexelSize;
float4 _Color;
sampler2D _Tex;
float4 _Tex_TexelSize;
float _BlurLevel;
sampler2D _CameraDepthTexture;
struct a2v
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float2 uvs[9] : TEXCOORD1;
};
v2f vert (a2v v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
o.uvs[0] = o.uv + _MainTex_TexelSize.xy * float2(-1,-1)*_BlurLevel;
o.uvs[1] = o.uv + _MainTex_TexelSize.xy * float2(0,-1) *_BlurLevel;
o.uvs[2] = o.uv + _MainTex_TexelSize.xy * float2(1,-1) *_BlurLevel;
o.uvs[3] = o.uv + _MainTex_TexelSize.xy * float2(-1,0) *_BlurLevel;
o.uvs[4] = o.uv;
o.uvs[5] = o.uv + _MainTex_TexelSize.xy * float2(1,0) *_BlurLevel;
o.uvs[6] = o.uv + _MainTex_TexelSize.xy * float2(-1,1) *_BlurLevel;
o.uvs[7] = o.uv + _MainTex_TexelSize.xy * float2(0,1) *_BlurLevel;
o.uvs[8] = o.uv + _MainTex_TexelSize.xy * float2(1,1) *_BlurLevel;
return o;
}
fixed4 BlurAverage(v2f i){ //均值模糊
fixed4 c;
for(int t = 0; t < 9; t++){
c +=tex2D(_MainTex,i.uvs[t]);
}
c = c / 9;
return c;
}
fixed4 gaussian(v2f i) { //实际高斯模糊的卷积核是5x5的矩阵,这里取了3x3的uv意思一下(懒),所有系数加起来要为1哦
fixed4 c=fixed4(0,0,0,0);
c += 0.05 * tex2D(_MainTex, i.uvs[0]);
c += 0.15 * tex2D(_MainTex, i.uvs[1]);
c += 0.05 * tex2D(_MainTex, i.uvs[2]);
c += 0.15 * tex2D(_MainTex, i.uvs[3]);
c += 0.2 * tex2D(_MainTex, i.uvs[4]);
c += 0.15 * tex2D(_MainTex, i.uvs[5]);
c += 0.05 * tex2D(_MainTex, i.uvs[6]);
c += 0.15 * tex2D(_MainTex, i.uvs[7]);
c += 0.05 * tex2D(_MainTex, i.uvs[8]);
return c;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = gaussian(i);
return col;
}
ENDCG
}
}
}
Shader "Custom/EdgeCheck" {
Properties{
_MainTex("Albedo (RGB)", 2D) = "white" {}
_EdgeOnly("Edge Only",Range(-2,0.9)) = 0.5
[HDR]_EdgeColor("Edge Color",Color) = (1,1,1,1)
}
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_TexelSize;
fixed _EdgeOnly;
fixed4 _EdgeColor;
struct a2v
{
float4 vertex:POSITION;
float2 texcoord:TEXCOORD0;
};
struct v2f
{
float4 pos:SV_POSITION;
half2 uv[9]:TEXCOORD0;
};
v2f vert(a2v v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
half2 uv = v.texcoord;
o.uv[0] = uv + half2(-1, -1) * _MainTex_TexelSize;
o.uv[1] = uv + half2(0, -1) * _MainTex_TexelSize;
o.uv[2] = uv + half2(1, -1) * _MainTex_TexelSize;
o.uv[3] = uv + half2(-1, 0) * _MainTex_TexelSize;
o.uv[4] = uv ;
o.uv[5] = uv + half2(1, 0) * _MainTex_TexelSize;
o.uv[6] = uv + half2(-1, 1) * _MainTex_TexelSize;
o.uv[7] = uv + half2(0, 1) * _MainTex_TexelSize;
o.uv[8] = uv + half2(1, 1) * _MainTex_TexelSize;
//获取以当前纹理坐标为中心的九宫格uv坐标
return o;
}
half sobel(v2f i)
{
//-----Sobel算子
const half Gx[9] =
{
-1,-2,-1,
0,0,0,
1,2,1
};
const half Gy[9] =
{
-1,0,1,
-2,0,2,
-1,0,1
};
//------end
half edgeX;
half edgeY;
for (int it = 0; it<9; it++)
{
half lum = Luminance(tex2D(_MainTex, i.uv[it]).rgb);//原rgb的贡献度
edgeX += lum * Gx[it];
edgeY += lum * Gy[it];
}
return 1 - abs(edgeX) - abs(edgeY);
}
fixed4 frag(v2f i) :SV_Target
{
//G值越大表示梯度越小,越不是边缘
half G = sobel(i);
fixed4 col;
if ( G >_EdgeOnly) {
col = tex2D(_MainTex, i.uv[4]);
}
else
{
col = _EdgeColor;
}
return col;
}
ENDCG
SubShader {
Tags{ "RenderType" = "Opaque" }
LOD 200
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
FallBack "Diffuse"
}
打入屏幕特效效果如下,一定要在光照计算完成之前计算,否则光照会影响计算结果
这两者shader原理都是通过对周围的uv采样再乘以一个卷积核来实现的。