https://catlikecoding.com/unity/tutorials/custom-srp/directional-shadows/
矩阵推导参考
Matrix4x4 ConvertToAtlasMatrix (Matrix4x4 m, Vector2 offset, int split) {
if (SystemInfo.usesReversedZBuffer) {
m.m20 = -m.m20;
m.m21 = -m.m21;
m.m22 = -m.m22;
m.m23 = -m.m23;
}
float scale = 1f / split;
m.m00 = (0.5f * (m.m00 + m.m30) + offset.x * m.m30) * scale;
m.m01 = (0.5f * (m.m01 + m.m31) + offset.x * m.m31) * scale;
m.m02 = (0.5f * (m.m02 + m.m32) + offset.x * m.m32) * scale;
m.m03 = (0.5f * (m.m03 + m.m33) + offset.x * m.m33) * scale;
m.m10 = (0.5f * (m.m10 + m.m30) + offset.y * m.m30) * scale;
m.m11 = (0.5f * (m.m11 + m.m31) + offset.y * m.m31) * scale;
m.m12 = (0.5f * (m.m12 + m.m32) + offset.y * m.m32) * scale;
m.m13 = (0.5f * (m.m13 + m.m33) + offset.y * m.m33) * scale;
m.m20 = 0.5f * (m.m20 + m.m30);
m.m21 = 0.5f * (m.m21 + m.m31);
m.m22 = 0.5f * (m.m22 + m.m32);
m.m23 = 0.5f * (m.m23 + m.m33);
return m;
}
Second, clip space is defined inside a cube with with coordinates going from −1 to 1, with zero at its center. But textures coordinates and depth go from zero to one. We can bake this conversion into the matrix by scaling and offsetting the XYZ dimensions by half. We could do this with a matrix multiplication, but it would result in a lot of multiplications with zero and needless additions. So let’s adjust the matrix directly.
m.m00 = 0.5f * (m.m00 + m.m30);
m.m01 = 0.5f * (m.m01 + m.m31);
m.m02 = 0.5f * (m.m02 + m.m32);
m.m03 = 0.5f * (m.m03 + m.m33);
m.m10 = 0.5f * (m.m10 + m.m30);
m.m11 = 0.5f * (m.m11 + m.m31);
m.m12 = 0.5f * (m.m12 + m.m32);
m.m13 = 0.5f * (m.m13 + m.m33);
m.m20 = 0.5f * (m.m20 + m.m30);
m.m21 = 0.5f * (m.m21 + m.m31);
m.m22 = 0.5f * (m.m22 + m.m32);
m.m23 = 0.5f * (m.m23 + m.m33);
return m;
Finally, we have to apply the tile offset and scale. Once again we can do this directly to avoid a lot of unnecessary calculations.
float scale = 1f / split;
m.m00 = (0.5f * (m.m00 + m.m30) + offset.x * m.m30) * scale;
m.m01 = (0.5f * (m.m01 + m.m31) + offset.x * m.m31) * scale;
m.m02 = (0.5f * (m.m02 + m.m32) + offset.x * m.m32) * scale;
m.m03 = (0.5f * (m.m03 + m.m33) + offset.x * m.m33) * scale;
m.m10 = (0.5f * (m.m10 + m.m30) + offset.y * m.m30) * scale;
m.m11 = (0.5f * (m.m11 + m.m31) + offset.y * m.m31) * scale;
m.m12 = (0.5f * (m.m12 + m.m32) + offset.y * m.m32) * scale;
m.m13 = (0.5f * (m.m13 + m.m33) + offset.y * m.m33) * scale;
我们需要把(-1,1)的坐标映射到(0.5,0.75)
-1<x<1
两边统一除以8,得到:
-0.125<x/8<0.125
然后再两边加上0.625,得到:
0.5<x/8+0.625<0.75
我们知道缩放是矩阵的对角线上的元素做变换,而平移是第撕四列元素的,前三个来标明x、y、z的偏移量。具体如下展示:
正好对应上面的矩阵。
这里讲下,offset.x,如上图,分成了4x4的格子,那么offset.x是从0到3进行变化。offset.y也是从0到3进行变化。
SetViewport画到rt的指定位置方法1:
比如,这里将rt,渲染到右上角的位置。此时我们这样做:
m_commandBuffer.GetTemporaryRT(m_dirShadowAtlasId, 1024, 1024, 24, FilterMode.Bilinear, RenderTextureFormat.Depth, RenderTextureReadWrite.Linear);
m_commandBuffer.SetRenderTarget(m_dirShadowAtlasId, RenderBufferLoadAction.DontCare, RenderBufferStoreAction.Store);
m_commandBuffer.ClearRenderTarget(true, true, Color.clear);
m_commandBuffer.SetViewport(new Rect(512, 512, 512, 512));
比如这里的rt的大小为1024*1024,渲染到右上角的话, 使用:
m_commandBuffer.SetViewport(new Rect(512, 512, 512, 512));
此时矩阵的设置:
Matrix4x4 vp = projectionMatrix * m_myCamera.m_lightCamera.worldToCameraMatrix;
Matrix4x4 vvp = ConvertToAtlasMatrix(vp, new Vector2(1, 1), 2);
Shader.SetGlobalMatrix("_gWorldToLight", vvp); //当前片段从世界坐标转换到光源相机空间坐标
Matrix4x4 ConvertToAtlasMatrix(Matrix4x4 m, Vector2 offset, int split)
{
//if (SystemInfo.usesReversedZBuffer)
//{
// m.m20 = -m.m20;
// m.m21 = -m.m21;
// m.m22 = -m.m22;
// m.m23 = -m.m23;
//}
float scale = 1f / split;
m.m00 = (0.5f * (m.m00 + m.m30) + offset.x * m.m30) * scale;
m.m01 = (0.5f * (m.m01 + m.m31) + offset.x * m.m31) * scale;
m.m02 = (0.5f * (m.m02 + m.m32) + offset.x * m.m32) * scale;
m.m03 = (0.5f * (m.m03 + m.m33) + offset.x * m.m33) * scale;
m.m10 = (0.5f * (m.m10 + m.m30) + offset.y * m.m30) * scale;
m.m11 = (0.5f * (m.m11 + m.m31) + offset.y * m.m31) * scale;
m.m12 = (0.5f * (m.m12 + m.m32) + offset.y * m.m32) * scale;
m.m13 = (0.5f * (m.m13 + m.m33) + offset.y * m.m33) * scale;
//m.m20 = 0.5f * (m.m20 + m.m30);
//m.m21 = 0.5f * (m.m21 + m.m31);
//m.m22 = 0.5f * (m.m22 + m.m32);
//m.m23 = 0.5f * (m.m23 + m.m33);
return m;
}
这里的offset为(1,1),因为分成了4块。右上角的offset为(1,1)。而split为2。
则在shader中采样的时候:
v2f vert(appdata_base v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
//计算顶点的世界坐标
float4 worldPos = mul(unity_ObjectToWorld, v.vertex);
//计算顶点的灯光坐标系坐标
o.shadowPos = mul(_gWorldToLight, worldPos);
o.worldPos = worldPos;
o.worldNormal = UnityObjectToWorldNormal(v.normal);
return o;
}
fixed4 frag(v2f i) : SV_Target{
//构建灯光坐标系下的NDC坐标,作为UV进行深度采样
float2 uv = i.shadowPos.xy / i.shadowPos.w;
/* uv.x = uv.x * 0.25 + 0.75;
uv.y = uv.y * 0.25 + 0.75;*/
//float depth = SAMPLE_DEPTH_TEXTURE(_gShadowTexture, uv);
float depth = Sample(uv);
#if defined(UNITY_REVERSED_Z)
depth = 1 - depth;
#endif
这里的uv,我们直接使用插值之后的.i.shadowPos.xy/i.shadowPos.w
这里已经将uv映射到了(0.5,1)范围内了,然后直接采样即可。
git地址:
https://gitee.com/yichichunshui/mvpmatrix.git
分支master
节点:
f7cbf788ac6d3a69df2baa872e7ad97638026aad