效果
思路
在相机上挂脚本,获得参数: 模糊程度、前一个视角投影矩阵、当前视角投影矩阵的逆矩阵,传递给到shader中
shader中,先对深度纹理采样,得到深度值。再通过之前传递的矩阵,得到世界空间中,当前帧改点所在位置与前一帧改点所在位置。 再根据这两个点,求得速度值(也就是上一步求得的两个点的中心点)。使用传入的模糊程度值*速度值,得到新的采样点。
将这些采样点求和再求平均值,得到最终点。进行返回。
实现
c#代码
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class MotionBlurWithDepthTexture1 : PostEffectBaseTest
{
public Shader motionBlurShader;
private Material motionBlurMaterial = null;
public Material material
{
get
{
motionBlurMaterial = CheckShaderAndCreateMaterial(motionBlurShader, motionBlurMaterial);
return motionBlurMaterial;
}
}
[Range(0.1f,1f)]
public float blurSize = 0.5f;
private Camera myCamera;
public Camera camera
{
get
{
if (myCamera == null)
{
myCamera = GetComponent<Camera>();
}
return myCamera;
}
}
private Matrix4x4 previousViewProjectMatrix;
private void OnEnable()
{
camera.depthTextureMode |= DepthTextureMode.Depth;
}
private void OnRenderImage(RenderTexture src, RenderTexture dest)
{
if (material != null)
{
material.SetFloat("_BlurSize",blurSize);//设置模糊的程度
material.SetMatrix("_PreviousViewProjectionMatrix",previousViewProjectMatrix);//设置前一个视角空间的矩阵
Matrix4x4 currentViewProjectMatrix = camera.projectionMatrix * camera.worldToCameraMatrix;//当前视角空间的, 世界空间转视角空间矩阵
Matrix4x4 currentViewProjectInverseMatrix = currentViewProjectMatrix.inverse;// 得到逆矩阵, 即得到当前的点的 视角空间转世界空间的矩阵
material.SetMatrix("_currentViewProjectInverseMatrix", currentViewProjectInverseMatrix); //传递当前空间的计算矩阵
previousViewProjectMatrix = currentViewProjectMatrix;//将当前矩阵传递给前矩阵
Graphics.Blit(src,dest,material);//使用材质进行渲染
Debug.Log("render");
}
else
{
Graphics.Blit(src,dest);
}
}
}
shader代码
Shader "Custom/MotionBlurWithdDepthTextureTest" {
Properties {
_MainTex("MainTex", 2D) = "white" {}
_BlurSize ("_BlurSize", Float) = 1.0
}
SubShader {
CGINCLUDE
#include "UnityCG.cginc"
sampler2D _MainTex;
half4 _MainTex_TexelSize;
sampler2D _CameraDepthTexture;
float4x4 _PreviousViewProjectionMatrix;
float4x4 _currentViewProjectInverseMatrix;
float _BlurSize;
struct v2f {
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
half2 uv_depth : TEXCOORD1;
};
v2f vert ( appdata_img v ) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.texcoord;
o.uv_depth = v.texcoord;
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
o.uv_depth.y = 1 - o.uv_depth.y;
#endif
return o;
};
fixed4 frag( v2f i): SV_Target{
//_CameraDepthTexture 深度纹理
//SAMPLE_DEPTH_TEXTURE 对深度采样
float d = SAMPLE_DEPTH_TEXTURE( _CameraDepthTexture, i.uv_depth );
//采样的颜色值,转成NDC下的坐标值
float4 H = float4( i.uv.x * 2 -1, i.uv.y*2-1, d*2-1, 1);
float4 D = mul( _currentViewProjectInverseMatrix, H); //投影坐标转世界坐标
float4 wolrdPos = D/D.w; //得到世界坐标
float4 currentPos = H;
float4 previousPos = mul ( _PreviousViewProjectionMatrix, wolrdPos ); //得到之前的世界坐标
previousPos /= previousPos.w;
float2 velocity = ( currentPos.xy - previousPos.xy )/2.0f; //通过位置差,求得速度,(理解为是采样偏差的比值)
float2 uv = i.uv;//对当前uv采样
float4 c = tex2D(_MainTex, uv);//先在原图上采样点
uv+= velocity*_BlurSize;//对采样点+距离,得到新的采样点
for(int it= 1; it<3; it++,uv+=velocity*_BlurSize)//采样两次
{
float4 currentColor = tex2D( _MainTex, uv);//对偏移uv采样(即速度结果)
c += currentColor; //将结果求和
}
c/=3; //取均值
return fixed4(c.rgb, 1);//返回最终结果
};
ENDCG
Pass {
ZTest Always Cull Off ZWrite Off
CGPROGRAM
//顶点着色器
#pragma vertex vert
//片元着色器
#pragma fragment frag
ENDCG
}
}
FallBack "Off"
}