一、运动模糊
之前说的两种方式:运动模糊实现
一种是累积缓存,来混和多张连续的图像,消耗性能。
还有一种是采用速度缓存,这个缓存存储了各个像素当前的运动速度,然后使用该值决定模糊的方向和大小。
速度缓存就是通过深度纹理逆运算得出NDC坐标再乘以逆矩阵从而还原出世界空间中的坐标,再使用上一帧的变换矩阵乘以坐标来得到上一帧的NDC坐标,然后根据两个NDC坐标计算像素速度,从而使用速度对主纹理进行相邻像素采样,然后混合得到运动模糊效果。
二、实现
摄像机屏幕后处理脚本代码:(详细解释都在注释)
using UnityEngine;
using System.Collections;
public class MotionBlurWithDepthTexture : PostEffectsBase {
public Shader motionBlurShader;
private Material motionBlurMaterial = null;
public Material material {
get {
motionBlurMaterial = CheckShaderAndCreateMaterial(motionBlurShader, motionBlurMaterial);
return motionBlurMaterial;
}
}
private Camera myCamera; //定义获取摄像机组件
public Camera camera {
get {
if (myCamera == null) {
myCamera = GetComponent<Camera>();
}
return myCamera;
}
}
[Range(0.0f, 1.0f)]
public float blurSize = 0.5f; //模糊大小
private Matrix4x4 previousViewProjectionMatrix; //定义一个变量用来保存上一帧的 视角*投影矩阵
void OnEnable() {
camera.depthTextureMode |= DepthTextureMode.Depth; //深度纹理
previousViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix; //得到矩阵
}
void OnRenderImage (RenderTexture src, RenderTexture dest) {
if (material != null) {
material.SetFloat("_BlurSize", blurSize); //给Shader传递blurSize
material.SetMatrix("_PreviousViewProjectionMatrix", previousViewProjectionMatrix); //传递上一帧矩阵
Matrix4x4 currentViewProjectionMatrix = camera.projectionMatrix * camera.worldToCameraMatrix; //计算当前帧矩阵和逆矩阵
Matrix4x4 currentViewProjectionInverseMatrix = currentViewProjectionMatrix.inverse;
material.SetMatrix("_CurrentViewProjectionInverseMatrix", currentViewProjectionInverseMatrix); //传递
previousViewProjectionMatrix = currentViewProjectionMatrix; //更新上一帧矩阵为本帧矩阵
Graphics.Blit (src, dest, material); //进行效果处理。
} else {
Graphics.Blit(src, dest);
}
}
}
Shader:
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'
Shader "Unity Shaders Book/Chapter 13/Motion Blur With Depth Texture" {
Properties {
_MainTex ("Base (RGB)", 2D) = "white" {} //输入的渲染纹理,也就是OnRenderImage函数的src
_BlurSize ("Blur Size", Float) = 1.0 //模糊系数
}
SubShader {
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
half4 _MainTex_TexelSize; //×××_TexelSize,可以让Untiy为我们提供×××纹理对应的每个纹素的大小
sampler2D _CameraDepthTexture; //Unity传递给我们的深度纹理
float4x4 _CurrentViewProjectionInverseMatrix; //现在的矩阵
float4x4 _PreviousViewProjectionMatrix; //上一帧的矩阵
half _BlurSize; //模糊系数
struct v2f {
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
half2 uv_depth : TEXCOORD1; //专门对深度纹理采样的纹理坐标
};
v2f vert(appdata_img v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
o.uv_depth = v.texcoord;
#if UNITY_UV_STARTS_AT_TOP //处理平台差异化,用来判断是否在DX平台,如果在dx平台开启了抗锯齿,y值会为负值,
if (_MainTex_TexelSize.y < 0)
o.uv_depth.y = 1 - o.uv_depth.y; //如果是,我们对Y坐标进行翻转
#endif
return o;
}
fixed4 frag(v2f i) : SV_Target {
float d = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv_depth); //对深度纹理采样,得到深度值d
float4 H = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, d * 2 - 1, 1); //得到NDC空间的坐标
float4 D = mul(_CurrentViewProjectionInverseMatrix, H); //对NDC坐标进行逆变换,返回到世界空间
float4 worldPos = D / D.w; //反过来透视除法?
//应该是,反顺序的话正常齐次除法应在屏幕映射之后,但是前面并没有w,所以安排到最后不影响,应该是这样?
//这样就得到了世界空间下的坐标
float4 currentPos = H; //现在的NDC坐标
float4 previousPos = mul(_PreviousViewProjectionMatrix, worldPos); //上一帧的视角投影矩阵变换世界坐标,这里应该还是视锥体,用来得到前一帧的NDC坐标
previousPos /= previousPos.w; //齐次除法 得到前一帧的NDC坐标
float2 velocity = (currentPos.xy - previousPos.xy)/2.0f; //计算像素的速度
float2 uv = i.uv;
float4 c = tex2D(_MainTex, uv); //正常采样
uv += velocity * _BlurSize; //加影响系数
for (int it = 1; it < 3; it++, uv += velocity * _BlurSize) { //对邻域像素采样
float4 currentColor = tex2D(_MainTex, uv); //采样
c += currentColor; //每次都相加
}
c /= 3; //平均值得到最终图像
return fixed4(c.rgb, 1.0); //结果
}
ENDCG
Pass {
ZTest Always Cull Off ZWrite Off
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
FallBack Off
}