UnityShader——阴影源码解析(一)

最近想着自己拿SRP写个能用的管线试试的,写到阴影的时候发现自己对Unity的阴影绘制流程并没有一个毕竟全面的了解,正好借此机会把Unity默认管线的阴影绘制过一遍

一、非屏幕空间阴影
我们从最简单的入手,目标PC DX11平台,不要Cascade,不要 screen space shadowmap。在 Graphics setting 里关掉 Cascaded Shadow,场景只打一个 hard shadow 的平行光,此时场景和 framedebugger 如下:

这里写图片描述
这里写图片描述

可以看到没有 screen space shadow 的 collect 阶段了,阴影渲染阶段直接调用了 std 的 shadowcaster pass,涉及代码如下:

#define TRANSFER_SHADOW_CASTER_NOPOS(o,opos) \
        opos = UnityClipSpaceShadowCasterPos(v.vertex, v.normal); \
        opos = UnityApplyLinearShadowBias(opos);

float4 UnityClipSpaceShadowCasterPos(float4 vertex, float3 normal)
{
    float4 wPos = mul(unity_ObjectToWorld, vertex);

    if (unity_LightShadowBias.z != 0.0)
    {
        float3 wNormal = UnityObjectToWorldNormal(normal);
        float3 wLight = normalize(UnityWorldSpaceLightDir(wPos.xyz));

        // apply normal offset bias (inset position along the normal)
        // bias needs to be scaled by sine between normal and light direction
        // (http://the-witness.net/news/2013/09/shadow-mapping-summary-part-1/)
        //
        // unity_LightShadowBias.z contains user-specified normal offset amount
        // scaled by world space texel size.

        float shadowCos = dot(wNormal, wLight);
        float shadowSine = sqrt(1-shadowCos*shadowCos);
        float normalBias = unity_LightShadowBias.z * shadowSine;

        wPos.xyz -= wNormal * normalBias;
    }

    return mul(UNITY_MATRIX_VP, wPos);
}

float4 UnityApplyLinearShadowBias(float4 clipPos)
{
#if defined(UNITY_REVERSED_Z)
    // We use max/min instead of clamp to ensure proper handling of the rare case
    // where both numerator and denominator are zero and the fraction becomes NaN.
    clipPos.z += max(-1, min(unity_LightShadowBias.x / clipPos.w, 0));
    float clamped = min(clipPos.z, clipPos.w*UNITY_NEAR_CLIP_VALUE);
#else
    clipPos.z += saturate(unity_LightShadowBias.x/clipPos.w);
    float clamped = max(clipPos.z, clipPos.w*UNITY_NEAR_CLIP_VALUE);
#endif
    clipPos.z = lerp(clipPos.z, clamped, unity_LightShadowBias.y);
    return clipPos;
}

其解释我直接引用 topameng 的:

如果z值没有偏移,很容易 Shadow Acne 现象。类似的比如z-fighting, 而如果z偏移值过大,又容易出现Peter Panning 现象,影子和物体出现分离现在常用的shadow bias的计算方法,是基于物体斜度的称为slope scale based depth bias 就是UnityClipSpaceShadowCasterPos中的内容。UnityApplyLinearShadowBias函数是在裁剪空间中的线性增加Z坐标的值unity_LightShadowBiasx表示阴影裁切空间中的线性偏移bias一般是个比较小的负数比如-0.0005,透视相机unity是按照摄像机参数计算的这个值, y 表示插值,一般为1. z 代表阴影slope depth bias scale对于直射光你可以认为ShadowMap就是一张深度图, 你改变颜色信息并不能影响深度。上面提到过是返回SV_Target 和SV_Depth的区别。shadowMap = new RenderTexture(width, height, 16, RenderTextureFormat.Depth);这样给一样能很好的工作不过Unity有新的RenderTextureFormat.Shadowmap格式,在Depth上加了一些保护之类。UnityApplyLinearShadowBias 给的偏移是往离摄像机近的方向挪一点。两个偏移都是往摄像机近一点调节,并不是相互抵消的,如果出现Peter Panning影子和模型分离,可以去光源面板调整阴影的bias项

继续往后看,此时framedebugger 给出的 keywords 是 :
DIRECTIONAL SHADOWS_SCREEN LIGHTPROBE_SH
我们可以在 AutoLight.cginc 中看到对应的阴影代码:

// ---- Screen space direction light shadows helpers (any version)
#if defined (SHADOWS_SCREEN)

    #if defined(UNITY_NO_SCREENSPACE_SHADOWS)
        UNITY_DECLARE_SHADOWMAP(_ShadowMapTexture);
        #define TRANSFER_SHADOW(a) a._ShadowCoord = mul( unity_WorldToShadow[0], mul( unity_ObjectToWorld, v.vertex ) );
        inline fixed unitySampleShadow (unityShadowCoord4 shadowCoord)
        {
            #if defined(SHADOWS_NATIVE)
                fixed shadow = UNITY_SAMPLE_SHADOW(_ShadowMapTexture, shadowCoord.xyz);
                shadow = _LightShadowData.r + shadow * (1-_LightShadowData.r);
                return shadow;
            #else
                unityShadowCoord dist = SAMPLE_DEPTH_TEXTURE(_ShadowMapTexture, shadowCoord.xy);
                // tegra is confused if we use _LightShadowData.x directly
                // with "ambiguous overloaded function reference max(mediump float, float)"
                unityShadowCoord lightShadowDataX = _LightShadowData.x;
                unityShadowCoord threshold = shadowCoord.z;
                return max(dist > threshold, lightShadowDataX);
            #endif
        }

    #else // UNITY_NO_SCREENSPACE_SHADOWS
        UNITY_DECLARE_SCREENSPACE_SHADOWMAP(_ShadowMapTexture);
        #define TRANSFER_SHADOW(a) a._ShadowCoord = ComputeScreenPos(a.pos);
        inline fixed unitySampleShadow (unityShadowCoord4 shadowCoord)
        {
            fixed shadow = UNITY_SAMPLE_SCREEN_SHADOW(_ShadowMapTexture, shadowCoord);
            return shadow;
        }

    #endif

    #define SHADOW_COORDS(idx1) unityShadowCoord4 _ShadowCoord : TEXCOORD##idx1;
    #define SHADOW_ATTENUATION(a) unitySampleShadow(a._ShadowCoord)
#endif

显然,此时 Unity 没有使用 screen space shadow, PC 平台也肯定有原生 shadowmap 支持

#if !defined(SHADER_API_GLES)
    // all platforms except GLES2.0 have built-in shadow comparison samplers
    #define SHADOWS_NATIVE
#elif defined(SHADER_API_GLES) && defined(UNITY_ENABLE_NATIVE_SHADOW_LOOKUPS)
    // GLES2.0 also has built-in shadow comparison samplers, but only on platforms where we pass UNITY_ENABLE_NATIVE_SHADOW_LOOKUPS from the editor
    #define SHADOWS_NATIVE
#endif

因此这里对应的 keywords 是 UNITY_NO_SCREENSPACE_SHADOWS SHADOWS_NATIVE
其中_LightShadowData 定义如下:

_LightShadowData.x - shadow strength
_LightShadowData.y - Appears to be unused
_LightShadowData.z - 1.0 / shadow far distance
_LightShadowData.w - shadow near distance

可以看到,其实做的事情跟 screen space shadow 差不多,同样是将顶点坐标转换到世界坐标再转换到阴影坐标,在 DX11 平台下,Shadowmap 直接通过 SamplerComparisonState 才采样,得到阴影图,简单直接!

二、屏幕空间阴影
我们在graphics setting 中开启 cascaded shadow, 但是在 quality setting 中设置 shadow cascades 为 no cascades,此时 framedebugger 显示的绘制流程如下:

这里写图片描述

这里可以看到多出了不少东西,其中 UpdateDepthTexture 在以下情况会出现:

1、Soft Particles option in quality settings.
2、The current Camera’s Camera.depthTextureMode is set to include Depth, most frequently set by an image effect.
3、The brightest directional light in the scene has shadows enabled.

其实就是需要用到相机深度图的情况啦,至于为何要单独把所有不透明物体渲染一遍,貌似是个祖传BUG,我这里用的是2018.1.0b12
这里写图片描述

接下来的阴影绘制都差不多,我们直接跳到 CollectShadow 阶段,在当前设置下,对应的keywords 为:SHADOWS_SPLIT_SPHERES SHADOWS_SINGLE_CASCADE,代码如下:

SubShader {
    Tags{ "ShadowmapFilter" = "HardShadow" }
    Pass {
        ZWrite Off ZTest Always Cull Off

        CGPROGRAM
        #pragma vertex vert
        #pragma fragment frag_hard
        #pragma multi_compile_shadowcollector

        inline float3 computeCameraSpacePosFromDepth(v2f i)
        {
            return computeCameraSpacePosFromDepthAndVSInfo(i);
        }
        ENDCG
    }
}

inline float3 computeCameraSpacePosFromDepthAndVSInfo(v2f i)
{
    float zdepth = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv.xy);

    // 0..1 linear depth, 0 at camera, 1 at far plane.
    float depth = lerp(Linear01Depth(zdepth), zdepth, unity_OrthoParams.w);
#if defined(UNITY_REVERSED_Z)
    zdepth = 1 - zdepth;
#endif

    // view position calculation for perspective & ortho cases
    float3 vposPersp = i.ray * depth;
    float3 vposOrtho = lerp(i.orthoPosNear, i.orthoPosFar, zdepth);
    // pick the perspective or ortho position as needed
    float3 camPos = lerp(vposPersp, vposOrtho, unity_OrthoParams.w);
    return camPos.xyz;
}

v2f vert (appdata v)
{
    v2f o;
    UNITY_SETUP_INSTANCE_ID(v);
    UNITY_TRANSFER_INSTANCE_ID(v, o);
    UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
    float4 clipPos = UnityObjectToClipPos(v.vertex);
    o.pos = clipPos;
    o.uv.xy = v.texcoord;

    // unity_CameraInvProjection at the PS level.
    o.uv.zw = ComputeNonStereoScreenPos(clipPos);

    // Perspective case
#ifdef UNITY_STEREO_INSTANCING_ENABLED
    o.ray = v.ray[unity_StereoEyeIndex];
#else
    o.ray = v.ray;
#endif

    // To compute view space position from Z buffer for orthographic case,
    // we need different code than for perspective case. We want to avoid
    // doing matrix multiply in the pixel shader: less operations, and less
    // constant registers used. Particularly with constant registers, having
    // unity_CameraInvProjection in the pixel shader would push the PS over SM2.0
    // limits.
    clipPos.y *= _ProjectionParams.x;
    float3 orthoPosNear = mul(unity_CameraInvProjection, float4(clipPos.x,clipPos.y,-1,1)).xyz;
    float3 orthoPosFar  = mul(unity_CameraInvProjection, float4(clipPos.x,clipPos.y, 1,1)).xyz;
    orthoPosNear.z *= -1;
    orthoPosFar.z *= -1;
    o.orthoPosNear = orthoPosNear;
    o.orthoPosFar = orthoPosFar;

    return o;
}

fixed4 frag_hard (v2f i) : SV_Target
{
    UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i); // required for sampling the correct slice of the shadow map render texture array

    float3 vpos = computeCameraSpacePosFromDepth(i);

    float4 wpos = mul (unity_CameraToWorld, float4(vpos,1));


    fixed4 cascadeWeights = GET_CASCADE_WEIGHTS (wpos, vpos.z);
    float4 shadowCoord = GET_SHADOW_COORDINATES(wpos, cascadeWeights);

    //1 tap hard shadow
    fixed shadow = UNITY_SAMPLE_SHADOW(_ShadowMapTexture, shadowCoord);
    shadow = lerp(_LightShadowData.r, 1.0, shadow);

    fixed4 res = shadow;
    return res;
}

其实就是把之前非屏幕空间时直接计算的世界坐标通过 depthmap 还原出来,剩下的阴影计算都一样,但这里的 v2f 结构体让我纠结了挺久:

struct v2f {

    float4 pos : SV_POSITION;

    // xy uv / zw screenpos
    float4 uv : TEXCOORD0;
    // View space ray, for perspective case
    float3 ray : TEXCOORD1;
    // Orthographic view space positions (need xy as well for oblique matrices)
    float3 orthoPosNear : TEXCOORD2;
    float3 orthoPosFar  : TEXCOORD3;
    UNITY_VERTEX_INPUT_INSTANCE_ID
    UNITY_VERTEX_OUTPUT_STEREO
};

看不到源码,不知道这个 ray 究竟是怎么传过来的,尝试了很多,通过如下方法还原了出来:

vert:
o.ray = mul(unity_CameraInvProjection, float4((float2(v.texcoord.x, v.texcoord.y) - 0.5) * 2, 1, -1) );

frag:
i.ray *= (_ProjectionParams.z / i.ray.z);

这个20还是自己瞎凑的。。居然结果完全一样,具体怎么个机制还请源码大佬赐教

到了这里,就会想那screen space shadowmap 相比于直接渲染的 shadowmap 有何优势呢?尤其是在祖传 BUG 的情况下还平白多渲染了一张 depthmap,很显然,就是为了cascaded shadow,考虑如下情况:

假设,如果我们不使用 screen space shadow 的话,那么如何实现cascaded shadow?

1、通过 Object 与相机距离来决定阴影渲染到哪个层级的shadowmap?
这样的话对于上图中的情况,就可能出现同样距离 Object 的阴影质量相差甚远

2、通过 顶点甚至像素与相机距离来决定阴影渲染到哪个层级的shadowmap?
这样的话对于上图中的情况,则需要渲染到两张甚至更多的 Shadowmap 中,对于不支持 MRT 的设备就凉凉了

三、 Cascaded Shadow

CBUFFER_START(UnityShadows)
	float4 unity_ShadowSplitSpheres[4];
	float4 unity_ShadowSplitSqRadii;
	float4 unity_LightShadowBias;
	float4 _LightSplitsNear;
	float4 _LightSplitsFar;
	float4x4 unity_WorldToShadow[4];
	half4 _LightShadowData;
	float4 unity_ShadowFadeCenterAndType;
CBUFFER_END

这块的定义没有源码是真没辙了,原始管线分析不了,就来看看官方给的两个 SRP 的阴影绘制吧,均使用 2018.2.6f1版本的 unity, 3.0.0版本的 SRP

3.1 LightWeight
搭建场景如图:

framedebugger如图:

可以看到,阴影部分除了 depthprepass 顺序变了下其他的都和原始管线是一样的,再看其 DirectionLight 的阴影绘制过程:

private static class DirectionalShadowConstantBuffer
{
   public static int _WorldToShadow;
   public static int _ShadowData;
   public static int _DirShadowSplitSpheres0;
   public static int _DirShadowSplitSpheres1;
   public static int _DirShadowSplitSpheres2;
   public static int _DirShadowSplitSpheres3;
   public static int _DirShadowSplitSphereRadii;
   public static int _ShadowOffset0;
   public static int _ShadowOffset1;
   public static int _ShadowOffset2;
   public static int _ShadowOffset3;
   public static int _ShadowmapSize;
}

void RenderDirectionalCascadeShadowmap(ref ScriptableRenderContext context, ref CullResults cullResults, ref LightData lightData, ref ShadowData shadowData)
{
   int shadowLightIndex = lightData.mainLightIndex;
   if (shadowLightIndex == -1)
       return;

   VisibleLight shadowLight = lightData.visibleLights[shadowLightIndex];
   Light light = shadowLight.light;
   Debug.Assert(shadowLight.lightType == LightType.Directional);

   if (light.shadows == LightShadows.None)
       return;

   Bounds bounds;
   if (!cullResults.GetShadowCasterBounds(shadowLightIndex, out bounds))
       return;

   CommandBuffer cmd = CommandBufferPool.Get(k_RenderDirectionalShadowmapTag);
   using (new ProfilingSample(cmd, k_RenderDirectionalShadowmapTag))
   {
       m_ShadowCasterCascadesCount = shadowData.directionalLightCascadeCount;
		
		//Shadow.Draw 阶段每个Cascade的绘制目标尺寸(实际上所有Cascade ShadowMap 都是绘制在
		//同一个RenderTarget上,平分后分别绘制不同Cascade的)
       int shadowResolution = LightweightShadowUtils.GetMaxTileResolutionInAtlas(shadowData.directionalShadowAtlasWidth, shadowData.directionalShadowAtlasHeight, m_ShadowCasterCascadesCount);
       float shadowNearPlane = light.shadowNearPlane;

       Matrix4x4 view, proj;
       var settings = new DrawShadowsSettings(cullResults, shadowLightIndex);

       m_DirectionalShadowmapTexture = RenderTexture.GetTemporary(shadowData.directionalShadowAtlasWidth,
           shadowData.directionalShadowAtlasHeight, k_ShadowmapBufferBits, m_ShadowmapFormat);
       m_DirectionalShadowmapTexture.filterMode = FilterMode.Bilinear;
       m_DirectionalShadowmapTexture.wrapMode = TextureWrapMode.Clamp;
       SetRenderTarget(cmd, m_DirectionalShadowmapTexture, RenderBufferLoadAction.DontCare,
           RenderBufferStoreAction.Store, ClearFlag.Depth, Color.black, TextureDimension.Tex2D);

       bool success = false;
       for (int cascadeIndex = 0; cascadeIndex < m_ShadowCasterCascadesCount; ++cascadeIndex)
       {
           success = LightweightShadowUtils.ExtractDirectionalLightMatrix(ref cullResults, ref shadowData, shadowLightIndex, cascadeIndex, shadowResolution, shadowNearPlane, out m_CascadeSplitDistances[cascadeIndex], out m_CascadeSlices[cascadeIndex], out view, out proj);
           if (success)
           {
               settings.splitData.cullingSphere = m_CascadeSplitDistances[cascadeIndex];
               LightweightShadowUtils.SetupShadowCasterConstants(cmd, ref shadowLight, proj, shadowResolution);
               LightweightShadowUtils.RenderShadowSlice(cmd, ref context, ref m_CascadeSlices[cascadeIndex], ref settings, proj, view);
           }
       }

       if (success)
           SetupDirectionalShadowReceiverConstants(cmd, ref shadowData, shadowLight);
   }
   context.ExecuteCommandBuffer(cmd);
   CommandBufferPool.Release(cmd);
}

public static bool ExtractDirectionalLightMatrix(ref CullResults cullResults, ref ShadowData shadowData, int shadowLightIndex, int cascadeIndex, int shadowResolution, float shadowNearPlane, out Vector4 cascadeSplitDistance, out ShadowSliceData shadowSliceData, out Matrix4x4 viewMatrix, out Matrix4x4 projMatrix)
     {
      ShadowSplitData splitData;
      //获取该cascade对应的包围球信息
      bool success = cullResults.ComputeDirectionalShadowMatricesAndCullingPrimitives(shadowLightIndex,
          cascadeIndex, shadowData.directionalLightCascadeCount, shadowData.directionalLightCascades, shadowResolution, shadowNearPlane, out viewMatrix, out projMatrix,
          out splitData);
	
      cascadeSplitDistance = splitData.cullingSphere;
      //计算该cascade对应的shadowmap该绘制在rendertarget的哪个部分
      shadowSliceData.offsetX = (cascadeIndex % 2) * shadowResolution;
      shadowSliceData.offsetY = (cascadeIndex / 2) * shadowResolution;
      shadowSliceData.resolution = shadowResolution;
      //计算缩放和bias偏移后的worldtoshadow矩阵(缩放是因为绘制目标只是rendertarget的一部分)
      shadowSliceData.shadowTransform = GetShadowTransform(projMatrix, viewMatrix);

      // If we have shadow cascades baked into the atlas we bake cascade transform
      // in each shadow matrix to save shader ALU and L/S
      if (shadowData.directionalLightCascadeCount > 1)
          ApplySliceTransform(ref shadowSliceData, shadowData.directionalShadowAtlasWidth, shadowData.directionalShadowAtlasHeight);

      return success;
  }

public static void RenderShadowSlice(CommandBuffer cmd, ref ScriptableRenderContext context,
    ref ShadowSliceData shadowSliceData, ref DrawShadowsSettings settings,
    Matrix4x4 proj, Matrix4x4 view)
{
    cmd.SetViewport(new Rect(shadowSliceData.offsetX, shadowSliceData.offsetY, shadowSliceData.resolution, shadowSliceData.resolution));
    cmd.EnableScissorRect(new Rect(shadowSliceData.offsetX + 4, shadowSliceData.offsetY + 4, shadowSliceData.resolution - 8, shadowSliceData.resolution - 8));

    cmd.SetViewProjectionMatrices(view, proj);
    context.ExecuteCommandBuffer(cmd);
    cmd.Clear();
    context.DrawShadows(ref settings);
    cmd.DisableScissorRect();
    context.ExecuteCommandBuffer(cmd);
    cmd.Clear();
}

很明显,应该是用了类似于这篇文章中提到的紧密投影的相关技术,可以看到 Shader 中的阴影部分代码,内置管线和LW的差不多,这里以LW为例:

half ComputeCascadeIndex(float3 positionWS)
{
	//计算当前像素世界坐标与包围球中心的距离
    float3 fromCenter0 = positionWS - _DirShadowSplitSpheres0.xyz;
    float3 fromCenter1 = positionWS - _DirShadowSplitSpheres1.xyz;
    float3 fromCenter2 = positionWS - _DirShadowSplitSpheres2.xyz;
    float3 fromCenter3 = positionWS - _DirShadowSplitSpheres3.xyz;
    float4 distances2 = float4(dot(fromCenter0, fromCenter0), dot(fromCenter1, fromCenter1), dot(fromCenter2, fromCenter2), dot(fromCenter3, fromCenter3));
	//判断像素是否在包围球内
    half4 weights = half4(distances2 < _DirShadowSplitSphereRadii);
    //如果像素同时在多个包围球内,则取index最小的(即离相机更近的)
    weights.yzw = saturate(weights.yzw - weights.xyz);

    return 4 - dot(weights, half4(4, 3, 2, 1));
}

float4 TransformWorldToShadowCoord(float3 positionWS)
{
#ifdef _SHADOWS_CASCADE
    half cascadeIndex = ComputeCascadeIndex(positionWS);
    return mul(_WorldToShadow[cascadeIndex], float4(positionWS, 1.0));
#else
    return mul(_WorldToShadow[0], float4(positionWS, 1.0));
#endif
}

这里有一点比较疑惑,CSM中通常如果不同层级的shadowmap overlap,然后同时处于两张shadowmap overlap处的像素点会同时在两个shadowmap上做采样然后插值,这篇文章包括 Real-time rendering 4th 中也有提到:

One solution is to have the view volumes slightly overlap. Samples taken in these overlap zones gather results from both adjoining shadow maps and are blended [ Tuft, David 2011]

但从Unity的源码看来明显是没有混合的,从效果上来看也是:

关闭软阴影后更加明显:

既然如此,我们自己来混合好了,修改内置的 ScreenSpaceShadows Shader 并设置:

inline fixed4 getCascadeWeights_splitSpheres(float3 wpos, out fixed4 anotherWeights, out float4 distances2)
{
    float3 fromCenter0 = wpos.xyz - unity_ShadowSplitSpheres[0].xyz;
    float3 fromCenter1 = wpos.xyz - unity_ShadowSplitSpheres[1].xyz;
    float3 fromCenter2 = wpos.xyz - unity_ShadowSplitSpheres[2].xyz;
    float3 fromCenter3 = wpos.xyz - unity_ShadowSplitSpheres[3].xyz;
    distances2 = float4(dot(fromCenter0,fromCenter0), dot(fromCenter1,fromCenter1), dot(fromCenter2,fromCenter2), dot(fromCenter3,fromCenter3));
    fixed4 weights = float4(distances2 < unity_ShadowSplitSqRadii);
    fixed4 originalWeights = weights;
    weights.yzw = saturate(weights.yzw - weights.xyz);
    anotherWeights = originalWeights - weights;
    return weights;
}

fixed4 frag_hard (v2f i) : SV_Target
{
    UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i); // required for sampling the correct slice of the shadow map render texture array
    float4 wpos;
    float3 vpos;

#if defined(STEREO_CUBEMAP_RENDER_ON)
    wpos.xyz = tex2D(_ODSWorldTexture, i.uv.xy).xyz;
    wpos.w = 1.0f;
    vpos = mul(unity_WorldToCamera, wpos).xyz;
#else
    vpos = computeCameraSpacePosFromDepth(i);
    wpos = mul (unity_CameraToWorld, float4(vpos,1));
#endif
    fixed4 anotherWeights = 0;
    float4 distance2 = 0;
    fixed4 cascadeWeights = getCascadeWeights_splitSpheres (wpos, anotherWeights, distance2);
    float4 shadowCoord = GET_SHADOW_COORDINATES(wpos, cascadeWeights);    
    //1 tap hard shadow
    fixed shadow = UNITY_SAMPLE_SHADOW(_ShadowMapTexture, shadowCoord);
    //只考虑了第一级和第二级的混合,一来简单,二来后面的太远了也根本看不出来    
    if(anotherWeights.y != 0)
    {
    	float4 shadowCoord1 = GET_SHADOW_COORDINATES(wpos, anotherWeights); 
    	fixed shadow1 = UNITY_SAMPLE_SHADOW(_ShadowMapTexture, shadowCoord1); 
    	shadow = lerp(shadow, shadow1, (distance2.x / unity_ShadowSplitSqRadii - 0.5) * 2);    
    }
    
    shadow = lerp(_LightShadowData.r, 1.0, shadow);

    fixed4 res = shadow;
    return res;
}

效果如下:

阴影设置使用了Low Resolution + Hard Shadow, 这样效果更明显

剩下还有软阴影和具体的包围球生成算法以及各种Bias方法,下篇再写吧。。嗨呀好气啊,感觉半天毛都没搞出来

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值