1.获得renderTexture上的4个角的近裁面位置
cam = GetComponent<Camera>();
Matrix4x4 inverseViewProjectionMatrix = GL.GetGPUProjectionMatrix(cam.projectionMatrix, true);
inverseViewProjectionMatrix *= cam.worldToCameraMatrix;
inverseViewProjectionMatrix = inverseViewProjectionMatrix.inverse;
Vector3 leftBottom = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(-1, -1, 1));
Vector3 rightBottom = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(1, -1, 1));
Vector3 leftTop = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(-1, 1, 1));
Vector3 rightTop = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(1, 1, 1));
2.获得renderTexture上的4个角的远裁面位置[相机空间的位置]
cam = GetComponent<Camera>();
Matrix4x4 inverseViewProjectionMatrix = GL.GetGPUProjectionMatrix(cam.projectionMatrix, true);//根据相机的投影矩阵计算GPU投影矩阵
inverseViewProjectionMatrix *= cam.worldToCameraMatrix;//unity_MatrixVP
inverseViewProjectionMatrix = inverseViewProjectionMatrix.inverse;//unity_MatrixVP矩阵的逆矩阵
Vector3 leftBottom = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(-1, -1, 0));
Vector3 rightBottom = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(1, -1, 0));
Vector3 leftTop = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(-1, 1, 0));
Vector3 rightTop = inverseViewProjectionMatrix.MultiplyPoint(new Vector3(1, 1, 0));
3. GL.GetGPUProjectionMatrix
GL.GetGPUProjectionMatrix函数获得差异处理后的投影矩阵,并且远裁面深度为0,近裁面深度为1。
Matrix4x4 projectionMatrix = GL.GetGPUProjectionMatrix(mCamera.projectionMatrix, true);
unity_MatrixVP = projectionMatrix * mCamera.worldToCameraMatrix;
DX左上角为(0,0)且值域为(0,1),GL左下角为(0,0)且值域为(-1,1)
GL.GetGPUProjectionMatrix:用于处理DX和GL的坐标差异性。
这个函数,当使用DX渲染的时候(底层统一用的是Opengl方式的)就是把DX 的Z轴,从-1,-1映射会(0,1)。y轴进行翻转了。
解释原文链接:https://blog.csdn.net/liuwumiyuhuiping/article/details/52524317
5.shdaer看属性数据,可以通过帧调试窗口看
6.矩阵
E00~E03
E10~E13
...
...
7.shader中获取长度
length(pos1 - pos2)
8.多一个drawcall一般来讲比在shader中进行复杂的运算更为节省性能
9.
[loop]
[unroll]
[Header(test)] 括号内即为头标题的显示文字 不要加引号,不支持中文
[NoScaleOffset] 隐藏贴图的Tilling和Offset选项
[Toggle] 模拟开关,0为假,1为真,同[MaterialToggle]
10.
static int _MaxRayLength = Shader.PropertyToID("_MaxRayLength");
11.
//当前激活的渲染目标
//获取当前状态的RenderTexture(对应于OnRenderImage的source)
UnityEngine.Rendering.BuiltinRenderTextureType.CurrentActive
12.CommandBuff将光源的shadowmap贴到屏幕上
public Light ml;
public RenderTexture rt;
CommandBuffer buf = null;
buf = new CommandBuffer();
buf.SetShadowSamplingMode(BuiltinRenderTextureType.CurrentActive, ShadowSamplingMode.RawDepth);
RenderTargetIdentifier id = new RenderTargetIdentifier(rt);
buf.Blit(BuiltinRenderTextureType.CurrentActive,id );
ml.AddCommandBuffer(LightEvent.AfterShadowMap, buf);
private void OnRenderImage(RenderTexture source, RenderTexture destination)
{
Graphics.Blit(rt,destination);
}
14.
_material.EnableKeyword("HEIGHT_FOG");
_material.DisableKeyword("HEIGHT_FOG");
15 两向量之间夹角弧度计算,当direction1 和direction2 都是单位向量时
radian=Acos(Dot(direction1,direction2))
Mathf.Rad2Deg - 弧度到度转换
16.
/// <summary>
/// 相机是否在点光源范围内
private bool IsCameraInPointLightBounds()
{
float distanceSqr = (_light.transform.position - Camera.current.transform.position).sqrMagnitude;
float extendedRange = _light.range + 1;
if (distanceSqr < (extendedRange * extendedRange))
return true;
return false;
}
/// <summary>
/// 相机是否在Spot光源范围内
private bool IsCameraInSpotLightBounds()
{
// check range
float distance = Vector3.Dot(_light.transform.forward, (Camera.current.transform.position - _light.transform.position));
float extendedRange = _light.range + 1;
if (distance > (extendedRange))
return false;
// check angle
float cosAngle = Vector3.Dot(transform.forward, (Camera.current.transform.position - _light.transform.position).normalized);
if ((Mathf.Acos(cosAngle) * Mathf.Rad2Deg) > (_light.spotAngle + 3) * 0.5f)
return false;
return true;
}
17.cos 和acos
Dot( direction1,direction2)=Cos(radian)*Length(direction1)*Length(direction2)=direction1.x*direction2.x+direction1.y*direction2.y
Cos(radian)=(direction1.x*direction2.x+direction1.y*direction2.y)/(Length(direction1)*Length(direction2)
radian=Acos((direction1.x*direction2.x+direction1.y*direction2.y)/(Length(direction1)*Length(direction2)))
radian=Acos((direction1.x*direction2.x+direction1.y*direction2.y)/(Length(direction1)*Length(direction2)))
float radian=Mathf.Acos(Mathf.Dot(direction1,direction2)) -> 得到弧度
float angle=Mathf.Acos(Mathf.Dot(direction1,direction2)) * Mathf.Rad2Deg -> 得到角度
18.投影向量
Vector3.Project(toP_foDirect, toP_toDirect);
public static Vector3 Project(Vector3 vector, Vector3 onNormal)
{
//pow(onNormal, 2)
float num1 = Vector3.Dot(onNormal, onNormal);
if ((double) num1 < (double) Mathf.Epsilon)
return Vector3.zero;
float num2 = Vector3.Dot(vector, onNormal);
return new Vector3(
onNormal.x * num2 / num1,
onNormal.y * num2 / num1,
onNormal.z * num2 / num1
);
}
19.OnPreRender和OnPostRender代替OnRenderImage
private void OnPreRender()
{
if(QualitySettings.antiAliasing == 0)
cameraRenderTex = RenderTexture.GetTemporary(Screen.width, Screen.height, 24, RenderTextureFormat.Default, RenderTextureReadWrite.Default);
else
cameraRenderTex = RenderTexture.GetTemporary(Screen.width, Screen.height, 24, RenderTextureFormat.Default, RenderTextureReadWrite.Default, QualitySettings.antiAliasing);
mainCamera.targetTexture = cameraRenderTex;
}
private void OnPostRender()
{
Graphics.Blit(cameraRenderTex, null as RenderTexture);
RenderTexture.ReleaseTemporary(cameraRenderTex);
}
20.射线追踪
https://zhuanlan.zhihu.com/p/21425792
简单的for循环:
//ro视线起点,rd是视线方向
vec3 raymarch(vec3 ro, vec3 rd)
{
const int stepNum= 100;
//光源强度
const float lightIntense = 100.;
//推进步幅
float stepSize= 250./stepNum;
vec3 light= vec3(0.0, 0.0, 0.0);
//光源位置
vec3 lightPos = vec3(2.,2.,.5);
float t = 1.0;
vec3 p = vec3(0.0, 0.0, 0.0);
for(int i=0; i<stepNum;i++)
{
vec3 p = ro + t*rd;
//采样点光照亮度
float vLight = lightIntense /dot(p-lightPos,p-lightPos);
light + =vLight;
//继续推进
t+=stepSize;
}
return light;
}
积分求解
float InScatter(vec3 start, vec3 rd, vec3 lightPos, float d)
{
vec3 q = start - lightPos;
float b = dot(rd, q);
float c = dot(q, q);
float iv = 1.0f / sqrt(c - b*b);
float l = iv * (atan( (d + b) * iv) - atan( b*iv ));
return l;
}
21.
特定关键字
in , out , inout,
uniform (被修饰的变量从外部传入), const
Vertex Shader的输入:appdata
POSITION, NORMAL, BINORMAL, BLENDINDICES, BLENDWEIGHT, TANGENT, PSIZE,
TEXCOORD0 ~ TEXCOORD7, SV_VertexID
Vertex Shader的输出, 也就是Pixel Shader的输入 v2f
POSITION, PSIZE, FOG,COLOR0 ~ COLOR1, TEXCOORD1 ~ TEXCOORD7,SV_POSITION
22.SV_VertexID
struct VSInput
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
uint vertexId : SV_VertexID;
};
23.移位操作符只能作用在int上
int2 a = int2(0.0,0.0);
int2 b = a>>1;
如果使用如下代码,会出现错误提示信息
float2 a = int2(0.0,0.0);
float2 b = a>>1;
24.三目运算符
if(a < 0){b = a}
else{c = a}
等价
a < 0 ) ?(b = a) :( c = a); expr2 和 expr3 必须是与 expr1 长度相同的向量
25.in out inout
in : 修辞一个形参只是用于输入,进入函数体时被初始化,且该形参值的改变不会影响实参值,这是典型的值传递方式。
out : 修辞一个形参只是用于输出的,进入函数体时并没有被初始化,这种类型的形参一般是一个函数的运行结果;
inout : 修辞一个形参既用于输入也用于输出,这是典型的引用传递。
举例如下:
void myFunction(out float x); // 形参 x ,只是用于输出
void myFunction(inout float x); // 形参 x ,即用于输入时初始化,也用于输出数据
void myFunction(in float x); // 形参 x ,只是用于输入
void myFunction(float x); / 等价与 in float x ,这种用法和 C/C++ 完全一致
也可以使用 return 语句来代替 out 修辞符的使用
26.
使用几何体着色器(#pragma geometry)将编译目标设置为4.0。
使用曲面细分着色器(#pragma hull或#pragma domain)将编译目标设置为4.6。
#pragma target 3.0:派生指令,纹理LOD采样,10个插值器,允许使用更多的数学/纹理指令。
27.
sampler2D_half - 低精度采样器
纹理包含HDR,颜色,则可能要使用半精度采样器
28.
sampler2D _MainTex;
half4 color = tex2D(_MainTex, uv);
unity允许声明贴图和采样器时使用DX11风格的HLSL语法,用一个特殊的命名惯例来将他们匹配起来;拥有名字为“sampler”+贴图名字 的采样器会对这个纹理进行取样
Texture2D _MainTex;
SamplerState sampler_MainTex; // "sampler" + “_MainTex”
half4 color = _MainTex.Sample(sampler_MainTex, uv);
29.替换着色器
camera.SetReplacementShader (EffectShader, "RenderType");
相机上着色器所有的有"RenderType"="Opaque"的SubShader,会被EffectShader的"RenderType"="Opaque"的subShader替换,SomethingElse同理替换
Shader "EffectShader" {
SubShader {
Tags { "RenderType"="Opaque" }
Pass {
...
}
}
SubShader {
Tags { "RenderType"="SomethingElse" }
Pass {
...
}
}
...
}
30.定点翻转
Direct3D的:顶部的坐标为0,向下则增加。这适用于Direct3D,Metal和控制台。
OpenGL:底部的坐标为0,向上增大。这适用于OpenGL和OpenGL ES。
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
uv.y = 1-uv.y;
#endif
31.
为了使Shaders在所有平台上都能正常工作,某些Shader值应使用以下语义:
顶点着色器输出(剪切空间)位置:SV_POSITION。有时,着色器会使用POSITION语义来使着色器在所有平台上都能正常工作。请注意,这不适用于Sony PS4或具有镶嵌效果。
片段着色器输出颜色:SV_Target。有时,着色器会使用COLOR或COLOR0使着色器在所有平台上正常工作。请注意,这在Sony PS4上不起作用。
32.lod
Unity中的内置着色器通过以下方式设置其LOD
VertexLit着色器= 100 VertexLit kind of shaders = 100
贴花,反射性顶点光= 150 Decal, Reflective VertexLit = 150
漫射= 200 Diffuse = 200
漫反射细节,反射凹凸未照明,反射凹凸VertexLit = 250 Diffuse Detail, Reflective Bumped Unlit, Reflective Bumped VertexLit = 250
凹凸,镜面反射= 300 Bumped, Specular = 300
凹凸镜面反射= 400 Bumped Specular = 400
视差= 500 Parallax = 500
视差镜面反射= 600 Parallax Specular = 600
33.纹理数组
_MyArr ("Tex", 2DArray) = "" {}
#pragma require 2darray
34.Visual Studio调试着色器
https://docs.unity3d.com/Manual/SL-DebuggingD3D11ShadersWithVS.html
35.深度专题
https://www.jianshu.com/p/80a932d1f11e
35.1 开启
Camera.main.depthTextureMode = DepthTextureMode.Depth;
35.2 声明
sampler2D _CameraDepthTexture;
35.3 访问
//1.如果是后处理,可以直接用uv访问
//vertex
//当有多个RenderTarget时,处理UV翻转问题
#if UNITY_UV_STARTS_AT_TOP //DirectX之类的
if(_MainTex_TexelSize.y < 0)
o.uv.y = 1 - o.uv.y; //满足上面两个条件时uv会翻转,因此需要转回来
#endif
//fragment
float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, i.uv));//获取像素的深度
//2.其他:利用投影纹理采样
//vertex
o.screenPos = ComputeScreenPos(o.vertex);
//fragment
float depth = SAMPLE_DEPTH_TEXTURE_PROJ(_CameraDepthTexture, UNITY_PROJ_COORD(i.screenPos));
float linear01Depth = Linear01Depth(depth); //转换成[0,1]内的线性变化深度值
float linearEyeDepth = LinearEyeDepth(depth); //转换到视野空间
35.4 NDC坐标反推世界坐标,利用VP矩阵重建世界坐标
35.4.1
// _InverseViewProjectionMatrix UNITY_MATRIX_VP矩阵的逆矩阵
float worldPos = mul(_InverseViewProjectionMatrix, float3(i.uv * 2 - 1, depth));//通过NDC坐标反推世界坐标
35.4.2
unity中
Matrix4x4 currentVP = VPMatrix;
Matrix4x4 currentInverseVP = VPMatrix.inverse;
mat.SetMatrix("_CurrentInverseVP", currentInverseVP);
Graphics.Blit(source, destination, mat);
shader中
float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, i.uv.zw));
float4 NDC = float4(i.uv.x * 2 - 1, i.uv.y * 2 - 1, depth * 2 - 1, 1); //NDC坐标
float4 WorldPosWithW = mul(_CurrentInverseVP, NDC);//VP逆矩阵乘以NDC坐标得到世界坐标
float4 WorldPos = WorldPosWithW / WorldPosWithW.w; //公式推导可知需除以w,参考:https://www.cnblogs.com/sword-magical-blog/p/10483459.html
//因为经过了 投影矩阵, 分量w 就不在为1, 所以要求得具体的世界坐标, 就需要 除以 分量w 的影响
35.4.3 利用视椎方向向量重建
unity中
float halfHeight = near * tan(fov/2);
float halfWidth = halfHeight * aspect;
Vector3 toTop = up * halfHeight;
Vector3 toRight = right * halfRight;
Vector3 toTopLeft = forward + toTop - toRight;
Vector3 toBottomLeft = forward - toTop - toRight;
Vector3 toTopRight = forward + toTop + toRight;
Vector3 toBottomRight = forward - toTop + toRight;
toTopLeft /= cam.nearClipPlane;//视锥体近平面的四个点
toBottomLeft /= cam.nearClipPlane;
toTopRight /= cam.nearClipPlane;
toBottomRight /= cam.nearClipPlane;