其实简单理解即是拿这一帧这个像素,和它上一帧做比较,知道了他速度是多少,再在这一帧这里通过他的速度对附近像素进行采样,得到新的像素,产生动态模糊的效果。
方法一:
使用速度映射技术。速度映射图中存了第个像素的速度,然后使用这个决定模糊的方向和大小。可以把场景中物体的速度渲染到一张纹理中。但这个要改场景中所有物体的Shader,比较麻烦。
方法二:
利用深度纹理在片元着色器中为每个像素 计算其在世界空间的位置。通过当前视角*投影矩阵对NDC下的顶点坐标进行改变得到的。然后我们计算前一帧的位置差生成速度,好一个屏幕后处理完成效果。但也进行了两次矩阵乘法,对性能有影响。
标准化设备坐标(Normalized Device Coordinates, NDC)
屏幕后处理实现
MotionBlurWithDepthTexture.cs脚本(挂在摄像机上)
说明都加在注释里了
using UnityEngine;
using System.Collections;
public class MotionBlurWithDepthTexture : PostEffectsBase {
public Shader motionBlurShader;
private Material motionBlurMaterial = null;
public Material material {
get {
motionBlurMaterial = CheckShaderAndCreateMaterial(motionBlurShader, motionBlurMaterial);
return motionBlurMaterial;
}
}
private Camera myCamera;
public Camera camera {
get {
if (myCamera == null) {
myCamera = GetComponent<Camera>();
}
return myCamera;
}
}
//模糊程度
[Range(0.0f, 1.0f)]
public float blurSize = 0.5f;
//这个用来存上一帧的摄像机视角*投影矩阵
private Matrix4x4 previousViewProjectionMatrix;
void OnEnable() {
camera.depthTextureMode |= DepthTextureMode.Depth;
previousViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix;
}
void OnRenderImage (RenderTexture src, RenderTexture dest) {
if (material != null) {
material.SetFloat("_BlurSize", blurSize);
//上一帧的矩阵在这里传进去,这些属性在Shader不需要定义
material.SetMatrix("_PreviousViewProjectionMatrix", previousViewProjectionMatrix);
//camera.projectionMatrix视角矩阵 camera.worldToCameraMatrix投影矩阵
Matrix4x4 currentViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix;
//取逆矩阵
Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;
//传给Shader
material.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix);
//存起来给进帧用
previousViewProjectionMatrix = currentViewProjectionMatrix;
//进行显示
Graphics.Blit (src, dest, material);
} else {
Graphics.Blit(src, dest);
}
}
}
基类
using UnityEngine;
using System.Collections;
[ExecuteInEditMode]
[RequireComponent (typeof(Camera))]
public class PostEffectsBase : MonoBehaviour {
//检查各种资源是否满足,我们调用
protected void CheckResources()
{
bool isSupported = CheckSupport();
if (isSupported == false) {
NotSupported();
}
}
//检查是否支持
protected bool CheckSupport() {
if (SystemInfo.supportsImageEffects == false || SystemInfo.supportsRenderTextures == false) {
Debug.LogWarning("This platform does not support image effects or render textures.");
return false;
}
return true;
}
//不支持的处理
protected void NotSupported() {
enabled = false;
}
protected void Start() {
CheckResources();
}
/// <summary>
/// 后期处理
/// </summary>
/// <param name="shader">该特效使用的Shader</param>
/// <param name="material">用于处理的材质</param>
/// <returns></returns>
protected Material CheckShaderAndCreateMaterial(Shader shader, Material material) {
if (shader == null) {
return null;
}
if (shader.isSupported && material && material.shader == shader)
return material;
if (!shader.isSupported) {
return null;
}
else {
material = new Material(shader);
material.hideFlags = HideFlags.DontSave;
if (material)
return material;
else
return null;
}
}
}
Shader部分代码,拖给摄像机脚步:
Shader "Unity Shaders Book/Chapter 13/Motion Blur With Depth Texture" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {}
_BlurSize ("Blur Size", Float) = 1.0
}
SubShader {
//CGINCLUDE可以避免两个完全一样的frag函数
//Unity会把 CGINCLUDE 和 ENDCG 之间的代码插入到每一个pass中,已达到声明一遍,多次使用的目的。
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
half4 _MainTex_TexelSize;
//Unity传进来的深度纹理
sampler2D _CameraDepthTexture;
//矩阵在这里写了,CS脚本可以直接赋值
float4x4 _CurrentViewProjectionInverseMatrix;
float4x4 _PreviousViewProjectionMatrix;
half _BlurSize;
struct v2f {
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
half2 uv_depth : TEXCOORD1;
};
v2f vert(appdata_img v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
o.uv_depth = v.texcoord;
//当为DirectX平台时,需要做翻转
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
o.uv_depth.y = 1 - o.uv_depth.y;
#endif
return o;
}
fixed4 frag(v2f i) : SV_Target {
//使用UV坐标对深度纹理采样得到深度值
float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth);
//取NDC坐标 d*2-1即可
float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1);
//逆矩阵变换
float4 D = mul(_CurrentViewProjectionInverseMatrix, H);
//世界坐标下下的位置
float4 worldPos = D / D.w;
//当前位置
float4 currentPos = H;
//上一帧位置
float4 previousPos = mul(_PreviousViewProjectionMatrix, worldPos);
// Convert to nonhomogeneous points [-1,1] by dividing by w.
previousPos /= previousPos.w;
//计算与上一帧的位置差,得到速度
float2 velocity = (currentPos.xy - previousPos.xy)/2.0f;
//得到速度后,利用速度对附近像素进行采样现到新的色值
float2 uv = i.uv;
float4 c = tex2D(_MainTex, uv);
uv += velocity * _BlurSize;
for (int it = 1; it < 3; it++, uv += velocity * _BlurSize) {
float4 currentColor = tex2D(_MainTex, uv);
c += currentColor;
}
c /= 3;
return fixed4(c.rgb, 1.0);
}
ENDCG
//下面是动态模糊所需要的Pass
Pass {
ZTest Always Cull Off ZWrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
FallBack Off
}