首先附上项目源码:https://gitee.com/yichichunshui/CommandBufferBlur.git
同时有个类似的例子:
https://blogs.unity3d.com/cn/2015/02/06/extending-unity-5-rendering-pipeline-command-buffers/
https://www.cnblogs.com/hont/p/8968804.html
效果展示:
实现场景:
实现原理:
- 得到屏幕的rt,使用方法: cb.Blit(BuiltinRenderTextureType.CurrentActive, screenRT);
这里的BuiltinRenderTextureType.CurrentActive就是OnRenderImage的中source。 - 这个图是在afterskybox之后采样得到。skybox经过查看,是在不透明物体之后。
- 对这个屏幕图片进行多次模糊
- 在plane中对这个模糊的图进行采样,注意这个plane是最后渲染的,在队列Transparent中。
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Rendering;
public class Blur : MonoBehaviour
{
public RenderTexture screenRT;
public RenderTexture tempRT;
public Camera m_camera;
public int blurTimes = 0;
public Shader blurShader;
private Material blurMat;
private CommandBuffer cb;
private int blurID;
private void OnEnable()
{
blurMat = new Material(blurShader);
cb = new CommandBuffer();
cb.name = "copyScreenxxxxxx";
m_camera.AddCommandBuffer(CameraEvent.AfterSkybox, cb);
blurID = Shader.PropertyToID("_BlurTex");
}
public void Update()
{
cb.Clear();
screenRT = new RenderTexture(m_camera.pixelWidth, m_camera.pixelHeight, 0);
tempRT = new RenderTexture(m_camera.pixelWidth, m_camera.pixelHeight, 0);
cb.Blit(BuiltinRenderTextureType.CurrentActive, screenRT);
for (int i = 0; i < blurTimes; ++i)
{
int horizontalPass = blurMat.FindPass("Horizontal");
int verticalPass = blurMat.FindPass("Vertical");
cb.Blit(screenRT, tempRT, blurMat, horizontalPass);
cb.Blit(tempRT, screenRT, blurMat, verticalPass);
}
screenRT.SetGlobalShaderProperty("_BlurTex");
}
}
这个shader,主要是对屏幕的图片进行水平和垂直模糊处理。
Shader "My/BlurShader"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
}
CGINCLUDE
#include "UnityCG.cginc"
static const int maxOffset = 3;
static const float kernel[7] = {
0.071303,
0.131514,
0.189879,
0.214607,
0.189879,
0.131514,
0.071303 };
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_TexelSize;
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
ENDCG
SubShader
{
Pass
{
Name "Horizontal"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = fixed4(0,0,0,0);
for (int j = -maxOffset; j <= maxOffset; j++)
{
col += tex2D(_MainTex, i.uv + float2(_MainTex_TexelSize.x * j, 0)) * kernel[j + maxOffset];
}
return col;
}
ENDCG
}
Pass
{
Name "Vertical"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
fixed4 frag(v2f i) : SV_Target
{
fixed4 col = fixed4(0, 0, 0, 0);
for (int j = -maxOffset; j <= maxOffset; j++) {
col += tex2D(_MainTex, i.uv + float2(0, _MainTex_TexelSize.y * j)) * kernel[j + maxOffset];
}
return col;
}
ENDCG
}
}
}
plane所用的shader。它有两个注意点:
一个是渲染队列,一个是根据自己的屏幕坐标uv,采样那张模糊的图的部分(主要其他没有被采样的部分,还是依然是backbuffer中的数据)。
Shader "My/Glass"
{
SubShader
{
Tags { "Queue"="Transparent" }
Pass
{
ZTest Always
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float4 uvgrab : TEXCOORD1;
};
sampler2D _BlurTex;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uvgrab = ComputeGrabScreenPos(o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = tex2Dproj(_BlurTex, UNITY_PROJ_COORD(i.uvgrab));
return col;
}
ENDCG
}
}
}