unity urp raytrace体积光god ray效果

试了一下在unity中使用raytrace方式实现体积光,

运行效果如下

raytrace开销较大,采样加到200几乎卡得跑不动了

首先在光源处拍摄场景(unity对mainlight做了这个处理,并且是级联可设置)

基本原理是在全屏路径下,根据场景深度,还原出世界坐标

根据世界坐标判断是哪个裁切球

(如果不是级联阴影,比如spotlight就不需要这个操作)

然后将世界坐标变换到光源观察坐标light_view_pos

再根据投影矩阵对应到光源纹理的深度纹理

比较当前点在光源摄像机的深度,判断是不是处于遮挡(即阴影)

原理和纹理阴影的处理方式类似

附shader代码如下

Shader "lsc/RaytraceShader"
{
    Properties
    {
        _MainTex ("Texture", 2D) = "white" {}
        _raytrace_step_count("rayrace step count", Int) = 5
        _scale("scale", float) = 1.0
    }
    SubShader
    {
        // No culling or depth
        Cull Off ZWrite Off ZTest Always

        Pass
        {
            HLSLPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            #pragma multi_compile _ _MAIN_LIGHT_SHADOWS
            #pragma multi_compile _ _MAIN_LIGHT_SHADOWS_CASCADE
            #pragma multi_compile_fragment _ _ADDITIONAL_LIGHT_SHADOWS
            #pragma multi_compile_fragment _ _SHADOWS_SOFT

            #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
            #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Lighting.hlsl"

            struct appdata
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            struct v2f
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
                float4 screen_pos : TEXCOORD1;
            };

            float4x4 _mtx_view_inv;
            float4x4 _mtx_proj_inv;
            TEXTURE2D_X_FLOAT(_CameraDepthTexture);
            SAMPLER(sampler_CameraDepthTexture);

            v2f vert (appdata v)
            {
                v2f o;

                VertexPositionInputs vertexInput = GetVertexPositionInputs(v.vertex.xyz);
                o.vertex = vertexInput.positionCS;
                o.screen_pos = ComputeScreenPos(o.vertex);

                o.uv = v.uv;

                return o;
            }

            sampler2D _MainTex;
            int _raytrace_step_count;
            float _scale;

            float4 cal_world_pos_by_dep(float ndc_dep, float2 screen_space, out float4 view_pos)
            {
                // 取出非线性深度与视深度
                float linearDepthZ = LinearEyeDepth(ndc_dep, _ZBufferParams);
                // 屏幕转ndc
                float4 ndc_pos;
                ndc_pos.xy = screen_space * 2.0 - 1.0;
                ndc_pos.zw = float2(ndc_dep, 1);
                // 添加齐次因子
                ndc_pos = ndc_pos * linearDepthZ;
                // 转成观察与世界坐标
                view_pos = mul(_mtx_proj_inv, ndc_pos);
                float4 world_pos = mul(_mtx_view_inv, float4(view_pos.xyz, 1));

                return world_pos;
            }


            float4 frag (v2f i) : SV_Target
            {
                float4 col = tex2D(_MainTex, i.uv);

                // 插值后的屏幕坐标去除齐次因子
                float2 screen_space = i.screen_pos.xy / i.screen_pos.w;
                // 取出非线性深度
                float org_depth = SAMPLE_TEXTURE2D_X(_CameraDepthTexture, sampler_CameraDepthTexture, screen_space).x;
                // 计算世界坐标
                float4 view_pos;
                float4 world_pos = cal_world_pos_by_dep(org_depth, screen_space, view_pos);

                float3 cam_wpos = GetCameraPositionWS();
                float3 v_step = (world_pos - cam_wpos) / _raytrace_step_count;

                float3 rt_start = cam_wpos;
                float shadow_atten = 0;
                UNITY_LOOP
                for (int i = 0; i < _raytrace_step_count; i++)//循环,超级低效
                {
                    float4 shadow_coord = TransformWorldToShadowCoord(rt_start);
                    rt_start += v_step;

                    Light mainLight = GetMainLight(shadow_coord);//这样产生了级联阴影采样
                    shadow_atten += mainLight.shadowAttenuation;
                }

                shadow_atten = (shadow_atten / _raytrace_step_count) * _scale;

                col.rgb = col.rgb * shadow_atten;

                return col;
            }
            ENDHLSL
        }
    }
}

对应的urp管线cs代码

using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;
using System;

public class RayTraceFogRenderPassFeature : ScriptableRendererFeature
{
    class CustomRenderPass : ScriptableRenderPass
    {
        public Material raytrace_material_;
        public RenderTargetIdentifier render_target_color_;
        public RenderTargetHandle temp_render_target_;
        public int raytrace_count_ = 5;
        public float scale_ = 1.0f;


        // This method is called before executing the render pass.
        // It can be used to configure render targets and their clear state. Also to create temporary render target textures.
        // When empty this render pass will render to the active camera render target.
        // You should never call CommandBuffer.SetRenderTarget. Instead call <c>ConfigureTarget</c> and <c>ConfigureClear</c>.
        // The render pipeline will ensure target setup and clearing happens in a performant manner.
        public override void OnCameraSetup(CommandBuffer cmd, ref RenderingData renderingData)
        {
        }

        // Here you can implement the rendering logic.
        // Use <c>ScriptableRenderContext</c> to issue drawing commands or execute command buffers
        // https://docs.unity3d.com/ScriptReference/Rendering.ScriptableRenderContext.html
        // You don't have to call ScriptableRenderContext.submit, the render pipeline will call it at specific points in the pipeline.
        public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
        {
            if (!raytrace_material_)
                return;

            raytrace_material_.SetInt("_raytrace_step_count", raytrace_count_);
            raytrace_material_.SetFloat("_scale", scale_);

            {
                Camera cam = renderingData.cameraData.camera;
                var mtx_view_inv = cam.worldToCameraMatrix.inverse;
                var mtx_proj_inv = cam.projectionMatrix.inverse;

                raytrace_material_.SetMatrix("_mtx_view_inv", mtx_view_inv);
                raytrace_material_.SetMatrix("_mtx_proj_inv", mtx_proj_inv);
            }

            const string CommandBufferTag = "raytrace fog Pass";
            var cmd = CommandBufferPool.Get(CommandBufferTag);

            RenderTextureDescriptor opaqueDesc = renderingData.cameraData.cameraTargetDescriptor;
            opaqueDesc.depthBufferBits = 0;
            cmd.GetTemporaryRT(temp_render_target_.id, opaqueDesc);

            // 通过材质,将计算结果存入临时缓冲区
            cmd.Blit(render_target_color_, temp_render_target_.Identifier(), raytrace_material_);
            // 再从临时缓冲区存入主纹理
            cmd.Blit(temp_render_target_.Identifier(), render_target_color_);

            // 执行命令缓冲区
            context.ExecuteCommandBuffer(cmd);
            // 释放命令缓存
            CommandBufferPool.Release(cmd);
            // 释放临时RT
            cmd.ReleaseTemporaryRT(temp_render_target_.id);
        }

        // Cleanup any allocated resources that were created during the execution of this render pass.
        public override void OnCameraCleanup(CommandBuffer cmd)
        {
        }
    }

    CustomRenderPass m_ScriptablePass;
    public Material raytrace_material_;
    public int raytrace_count_ = 5;
    public float scale_ = 1.0f;

    /// <inheritdoc/>
    public override void Create()
    {
        m_ScriptablePass = new CustomRenderPass();

        // Configures where the render pass should be injected.
        m_ScriptablePass.renderPassEvent = RenderPassEvent.AfterRenderingOpaques;
    }

    // Here you can inject one or multiple render passes in the renderer.
    // This method is called when setting up the renderer once per-camera.
    public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
    {
        m_ScriptablePass.render_target_color_ = renderer.cameraColorTarget;
        m_ScriptablePass.raytrace_material_ = raytrace_material_;
        m_ScriptablePass.raytrace_count_ = raytrace_count_;
        m_ScriptablePass.scale_ = scale_;

        renderer.EnqueuePass(m_ScriptablePass);
    }
}


  • 0
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
Unity URP(Universal Render Pipeline)是Unity引擎的一种渲染管线,它提供了一种用于创建高质量的实时渲染效果的方式。 要实现手电筒效果,首先我们需要创建一个聚灯(Spotlight)对象,该对象作为手电筒的源。在Unity的场景中,我们可以通过创建一个聚灯来模拟手电筒效果。设置聚灯的位置和旋转方向,以正确照亮游戏场景。 在URP中,我们可以使用Volume组件来控制渲染效果。创建一个Volume,选择“添加Override”来添加一个覆盖,选择"Lighting > Additional Light Data"来控制聚灯的相关属性。在这里,我们可以设置聚灯的颜色、强度、角度和范围等参数。 为了使手电筒效果更真实,我们还可以添加一个雾效(Volumetric Fog)。Volumetric Fog可以模拟光线在空气中的散射效果,给手电筒效果增加一些逼真度。在Volume组件中选择“添加Override”,然后选择"Rendering > Volumetric Fog"来添加并控制雾效的参数。 在场景中调整聚灯的位置和角度,可以看到聚灯产生的束随着手电筒的移动而变化。当玩家在游戏中控制角色移动时,手电筒效果能够在场景中产生动态的照明效果,增强了游戏的沉浸感和真实感。 总之,Unity URP提供了一种简单而强大的方式来实现手电筒效果。通过创建聚灯和调整其属性,加上可能的雾效,我们可以在游戏中实现有趣而逼真的手电筒效果

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值