想要做一种能见度很低的不会飘动浓雾效果,unity自带的体积雾不行,就想到对比深度来进行渲染
实现想要的效果后加上高斯模糊就变成了相机的聚焦效果
shader中blur = tex2D(_BlurTex, i.uv) C#中/*-----*/注释部分放开就是相机景深效果
其中fixed4 blur = /*tex2D(_BlurTex, i.uv)*/_fog_color;中 如果blur = tex2D(_BlurTex, i.uv)就是相机景深效果,blur = _fog_color 就是浓雾效果
shader代码如下
Shader "Custom/DepthOfField" {
Properties{
_MainTex("Base (RGB)", 2D) = "white" {}
_BlurTex("Blur", 2D) = "white"{}
}
CGINCLUDE
#include "UnityCG.cginc"
struct v2f_blur
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float4 uv01 : TEXCOORD1;
float4 uv23 : TEXCOORD2;
float4 uv45 : TEXCOORD3;
};
struct v2f_dof
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float2 uv1 : TEXCOORD1;
};
sampler2D _MainTex;
float4 _MainTex_TexelSize;
sampler2D _BlurTex;
sampler2D_float _CameraDepthTexture;
float4 _offsets;
fixed4 _fog_color;
float _focalDistance;
float _nearBlurScale;
float _farBlurScale;
//高斯模糊 vert shader
v2f_blur vert_blur(appdata_img v)
{
v2f_blur o;
_offsets *= _MainTex_TexelSize.xyxy;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord.xy;
o.uv01 = v.texcoord.xyxy + _offsets.xyxy * float4(1, 1, -1, -1);
o.uv23 = v.texcoord.xyxy + _offsets.xyxy * float4(1, 1, -1, -1) * 2.0;
o.uv45 = v.texcoord.xyxy + _offsets.xyxy * float4(1, 1, -1, -1) * 3.0;
return o;
}
//高斯模糊 pixel shader
fixed4 frag_blur(v2f_blur i) : SV_Target
{
fixed4 color = fixed4(0,0,0,0);
color += 0.40 * tex2D(_MainTex, i.uv);
color += 0.15 * tex2D(_MainTex, i.uv01.xy);
color += 0.15 * tex2D(_MainTex, i.uv01.zw);
color += 0.10 * tex2D(_MainTex, i.uv23.xy);
color += 0.10 * tex2D(_MainTex, i.uv23.zw);
color += 0.05 * tex2D(_MainTex, i.uv45.xy);
color += 0.05 * tex2D(_MainTex, i.uv45.zw);
return color;
}
//景深效果 vertex shader
v2f_dof vert_dof(appdata_img v)
{
v2f_dof o;
//mvp矩阵变换
o.pos = UnityObjectToClipPos(v.vertex);
//uv坐标传递
o.uv.xy = v.texcoord.xy;
o.uv1.xy = o.uv.xy;
//dx中纹理从左上角为初始坐标,需要反向
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
o.uv.y = 1 - o.uv.y;
#endif
return o;
}
fixed4 frag_dof(v2f_dof i) : SV_Target
{
//取原始清晰图片进行uv采样
fixed4 ori = tex2D(_MainTex, i.uv1);
// /*取模糊普片进行uv采样*/纯色
fixed4 blur = /*tex2D(_BlurTex, i.uv)*/_fog_color;
//取当位置对应的深度值
float depth = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv);
//将深度值转化到01线性空间
depth = Linear01Depth(depth);
//如果depth小于焦点的物体,那么使用原始清晰图像,否则使用模糊的图像与清晰图像的差值,通过差值避免模糊和清晰之间明显的边界,结果为远景模糊效果
fixed4 final = (depth <= _focalDistance) ? ori : lerp(ori, blur, clamp((depth - _focalDistance) * _farBlurScale, 0, 1));
//上面的结果,再进行一次计算,如果depth大于焦点的物体,使用上面的结果和模糊图像差值,得到近景模糊效果
final = (depth > _focalDistance) ? final : lerp(ori, blur, clamp((_focalDistance - depth) * _nearBlurScale, 0, 1));
//焦点位置是清晰的图像,两边分别用当前像素深度距离焦点的距离进行差值,这样就达到原理焦点位置模糊的效果
//上面的?在编译时会被编译成if语句,GPU并不擅长分支计算,而且如果有分支,两个分支都要跑。这里给了一个更优化一些的计算方式,不过语法比较晦涩
//float focalTest = clamp(sign(depth - _focalDistance),0,1);
//fixed4 final = (1 - focalTest) * ori + focalTest * lerp(ori, blur, clamp((depth - _focalDistance) * _farBlurScale, 0, 1));
//final = (focalTest)* final + (1 - focalTest) * lerp(ori, blur, clamp((_focalDistance - depth) * _nearBlurScale, 0, 1));
return final;
}
ENDCG
SubShader
{
//pass 0: 高斯模糊
Pass
{
ZTest Off
Cull Off
ZWrite Off
Fog{ Mode Off }
CGPROGRAM
#pragma vertex vert_blur
#pragma fragment frag_blur
ENDCG
}
//pass 1: 景深效果
Pass
{
ZTest Off
Cull Off
ZWrite Off
Fog{ Mode Off }
ColorMask RGBA
CGPROGRAM
#pragma vertex vert_dof
#pragma fragment frag_dof
ENDCG
}
}
}
C#代码脚本挂载到相机上/**/注释部分是进行相应的高斯模糊处理代码
//非运行时也触发效果
[ExecuteInEditMode]
//屏幕后处理特效一般都需要绑定在摄像机上
[RequireComponent(typeof(Camera))]
public class DepthOfFiled : MonoBehaviour
{
//Inspector面板上直接拖入
public Shader shader = null;
private Material _material = null;
public Material _Material
{
get
{
if (_material == null)
_material = GenerateMaterial(shader);
return _material;
}
}
//根据shader创建用于屏幕特效的材质
protected Material GenerateMaterial(Shader shader)
{
if (shader == null)
return null;
//需要判断shader是否支持
if (shader.isSupported == false)
return null;
Material material = new Material(shader);
material.hideFlags = HideFlags.DontSave;
if (material)
return material;
return null;
}
//设置纯色浓雾效果颜色 高斯模糊时用不到
public Color fogColor = Color.white;
[Range(0.0f, 100.0f)]
public float focalDistance = 10.0f;
[Range(0.0f, 100.0f)]
public float nearBlurScale = 0.0f;
[Range(0.0f, 1000.0f)]
public float farBlurScale = 50.0f;
//分辨率采样率是高斯模糊时需要用到的属性
//分辨率
public int downSample = 1;
//采样率
public int samplerScale = 1;
private Camera _mainCam = null;
public Camera MainCam
{
get
{
if (_mainCam == null)
_mainCam = GetComponent<Camera>();
return _mainCam;
}
}
void OnEnable()
{
//maincam的depthTextureMode是通过位运算开启与关闭的
MainCam.depthTextureMode |= DepthTextureMode.Depth;
}
void OnDisable()
{
MainCam.depthTextureMode &= ~DepthTextureMode.Depth;
}
void OnRenderImage(RenderTexture source, RenderTexture destination)
{
if (_Material)
{
//首先将我们设置的焦点限制在远近裁剪面之间
Mathf.Clamp(focalDistance, MainCam.nearClipPlane, MainCam.farClipPlane);
//申请两块RT,并且分辨率按照downSameple降低
RenderTexture temp1 = RenderTexture.GetTemporary(source.width >> downSample, source.height >> downSample, 0, source.format);
RenderTexture temp2 = RenderTexture.GetTemporary(source.width >> downSample, source.height >> downSample, 0, source.format);
/*//直接将场景图拷贝到低分辨率的RT上达到降分辨率的效果
Graphics.Blit(source, temp1);
//高斯模糊,两次模糊,横向纵向,使用pass0进行高斯模糊——如果纯色模糊不需要高斯模糊
_Material.SetVector("_offsets", new Vector4(0, samplerScale, 0, 0));
Graphics.Blit(temp1, temp2, _Material, 0);
_Material.SetVector("_offsets", new Vector4(samplerScale, 0, 0, 0));
Graphics.Blit(temp2, temp1, _Material, 0);
//景深操作,景深需要两的模糊效果图我们通过_BlurTex变量传入shader
_Material.SetTexture("_BlurTex", temp1);*/
//设置浓雾颜色
_Material.SetColor("_fog_color", fogColor);
//设置shader的参数,主要是焦点和远近模糊的权重,权重可以控制插值时使用模糊图片的权重
_Material.SetFloat("_focalDistance", FocalDistance01(focalDistance));
_Material.SetFloat("_nearBlurScale", nearBlurScale);
_Material.SetFloat("_farBlurScale", farBlurScale);
//使用pass1进行景深效果计算,清晰场景图直接从source输入到shader的_MainTex中
Graphics.Blit(source, destination, _Material, 1);
//释放申请的RT
RenderTexture.ReleaseTemporary(temp1);
RenderTexture.ReleaseTemporary(temp2);
}
}
//计算设置的焦点被转换到01空间中的距离,以便shader中通过这个01空间的焦点距离与depth比较
private float FocalDistance01(float distance)
{
return MainCam.WorldToViewportPoint((distance - MainCam.nearClipPlane) * MainCam.transform.forward + MainCam.transform.position).z / (MainCam.farClipPlane - MainCam.nearClipPlane);
}
}