动态地形细LOD
本部分主要使用细分着色器渲染精细化地形,并以高度位移贴图区分高度纹理和根据距离逐片元视锥细分LOD。
一、 数据准备
基础vulkan创建的步骤及天空盒的创建不再赘述,我们主要是来看一下地形数据的创建。首先介绍一个概念: sobel滤波器:
1.1 sobel滤波器
sobel滤波器常用来提取灰度图像的水平边缘(水平特征)和竖直边缘(竖直特征)。它是在图像处理和计算机视觉得到广泛应用的一种图像边缘检测算法。
sobel算子 :
Sobel算子使用两个(3x3)矩阵来对原图进行卷积运算以计算出两个方向的灰度差分(偏导)的估计值(一个水平方向、一个竖直方向)。我们假定A是四周的原始图像(彩色图像需先转换为灰度图像),Gx和Gy分别是在横向及纵向的灰度偏导的近似值(即两个方向上对原图的平面卷积结果)。数学表达如下:
知道这个概念后,我们就可在下边创建地形数据时更加透彻的明白我们是如何使用sobel过滤器从高度图计算法线。
1.2 地形数据
首先创建一个函数generateTerrain,来为细分着色器生成一个地形数据:
// 为细分着色器生成一个地形数据
void generateTerrain()
{
struct Vertex {
glm::vec3 pos;
glm::vec3 normal;
glm::vec2 uv;
};
#define PATCH_SIZE 64 //地形区域
#define UV_SCALE 1.0f //凹凸比例
const uint32_t vertexCount = PATCH_SIZE * PATCH_SIZE;
Vertex *vertices = new Vertex[vertexCount];
const float wx = 2.0f, wy = 2.0f;//x/y向比例
//位置数据
for (auto x = 0; x < PATCH_SIZE; x++)
{
for (auto y = 0; y < PATCH_SIZE; y++)
{
uint32_t index = (x + y * PATCH_SIZE);
vertices[index].pos[0] = x * wx + wx / 2.0f - (float)PATCH_SIZE * wx / 2.0f;
vertices[index].pos[1] = 0.0f;
vertices[index].pos[2] = y * wy + wy / 2.0f - (float)PATCH_SIZE * wy / 2.0f;
vertices[index].uv = glm::vec2((float)x / PATCH_SIZE, (float)y / PATCH_SIZE) * UV_SCALE;
}
}
//使用sobel过滤器从高度图计算法线
HeightMap heightMap(getAssetPath() + "textures/terrain_heightmap_r16.ktx", PATCH_SIZE);
for (auto x = 0; x < PATCH_SIZE; x++)
{
for (auto y = 0; y < PATCH_SIZE; y++)
{
// 获取以当前位置为中心的高度样本
float heights[3][3];//四周的灰度值
for (auto hx = -1; hx <= 1; hx++)
{
for (auto hy = -1; hy <= 1; hy++)
{
heights[hx+1][hy+1] = heightMap.getHeight(x + hx, y + hy);
}
}
// 法线计算正常
glm::vec3 normal;
// Gx计算
normal.x = heights[0][0] - heights[2][0] + 2.0f * heights[0][1] - 2.0f * heights[2][1] + heights[0][2] - heights[2][2];
// Gy计算
normal.z = heights[0][0] + 2.0f * heights[1][0] + heights[2][0] - heights[0][2] - 2.0f * heights[1][2] - heights[2][2];
//使用滤波后的x和y轴计算法线缺失的向上分量
//第一个值控制凹凸强度
normal.y = 1.25f * sqrt( 1.0f - normal.x * normal.x - normal.z * normal.z);
vertices[x + y * PATCH_SIZE].normal = glm::normalize(normal * glm::vec3(2.0f, 1.0f, 2.0f));
vertices[x + y * PATCH_SIZE].normal = glm::normalize(normal);
}
}
// 索引数据
const uint32_t w = (PATCH_SIZE - 1);
const uint32_t indexCount = w * w * 4;
uint32_t *indices = new uint32_t[indexCount];
for (auto x = 0; x < w; x++)
{
for (auto y = 0; y < w; y++)
{
uint32_t index = (x + y * w) * 4;
indices[index] = (x + y * PATCH_SIZE);
indices[index + 1] = indices[index] + PATCH_SIZE;
indices[index + 2] = indices[index + 1] + 1;
indices[index + 3] = indices[index] + 1;
}
}
models.terrain.indexCount = indexCount;
uint32_t vertexBufferSize = vertexCount * sizeof(Vertex);
uint32_t indexBufferSize = indexCount * sizeof(uint32_t);
struct {
VkBuffer buffer;
VkDeviceMemory memory;
} vertexStaging, indexStaging;
...
//创建暂存缓冲区
...
//从暂存缓冲区复制
...
二、管线流程
创建好地形等缓存数据后,我们主要来看一下管线部分:
// 地形细分管线
std::array<VkPipelineShaderStageCreateInfo, 4> shaderStages;
shaderStages[0] = loadShader("terrain.vert.spv", VK_SHADER_STAGE_VERTEX_BIT);
shaderStages[1] = loadShader("terrain.frag.spv", VK_SHADER_STAGE_FRAGMENT_BIT);
shaderStages[2] = loadShader("terrain.tesc.spv", VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
shaderStages[3] = loadShader("terrain.tese.spv", VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
从上述管线中,我们可以看出使用了顶点、细分控制、细分评估片元四个着色器。
2.1、顶点着色器
很简单的顶点数据传输
#version 450
layout (location = 0) in vec3 inPos;
layout (location = 1) in vec3 inNormal;
layout (location = 2) in vec2 inUV;
layout (location = 0) out vec3 outNormal;
layout (location = 1) out vec2 outUV;
void main(void)
{
gl_Position = vec4(inPos.xyz, 1.0);
outUV = inUV;
outNormal = inNormal;
}
2.2、表面细分控制着色器
从顶点着色器传过来的数据,我们在细分控制着色器中做如下处理:
#version 450
layout(set = 0, binding = 0) uniform UBO
{
mat4 projection;
mat4 modelview;
vec4 lightPos;
vec4 frustumPlanes[6];
float displacementFactor;
float tessellationFactor;
vec2 viewportDim;
float tessellatedEdgeSize;
} ubo;
layout(set = 0, binding = 1) uniform sampler2D samplerHeight;
//四边细分模式
layout (vertices = 4) out;
layout (location = 0) in vec3 inNormal[];
layout (location = 1) in vec2 inUV[];
layout (location = 0) out vec3 outNormal[4];
layout (location = 1) out vec2 outUV[4];
// 根据屏幕空间计算细分因子(根据屏幕边缘尺寸)
float screenSpaceTessFactor(vec4 p0, vec4 p1)
{
// 计算边缘中点
vec4 midPoint = 0.5 * (p0 + p1);
//球体半径作为控制点之间的距离
float radius = distance(p0, p1) / 2.0;
// 视图空间转换
vec4 v0 = ubo.modelview * midPoint;
// 项目空间到裁剪空间
vec4 clip0 = (ubo.projection * (v0 - vec4(radius, vec3(0.0))));
vec4 clip1 = (ubo.projection * (v0 + vec4(radius, vec3(0.0))));
// 获取归一化设备坐标
clip0 /= clip0.w;
clip1 /= clip1.w;
// 设备坐标转换到视窗坐标
clip0.xy *= ubo.viewportDim;
clip1.xy *= ubo.viewportDim;
//T clamp(T x, T minVal, T maxVal) min(max(x, minVal), maxVal),返回值被限定在 minVal,maxVal之间
//返回基于屏幕大小的细分因子
//由屏幕空间中两个边缘控制点的距离和应用程序设置的边缘的参考(最小)细分尺寸给出
return clamp(distance(clip0, clip1) / ubo.tessellatedEdgeSize * ubo.tessellationFactor, 1.0, 64.0);
}
// 使用球面检查当前patch对视锥台的可见性
// 球体半径由patch的大小给出
bool frustumCheck()
{
// 固定的半径(如果在示例中patch的尺寸增大,则增大)
const float radius = 8.0f;
vec4 pos = gl_in[gl_InvocationID].gl_Position;
pos.y -= textureLod(samplerHeight, inUV[0], 0.0).r * ubo.displacementFactor;
// 用前后截取的平面检查球体
for (int i = 0; i < 6; i++) {
if (dot(pos, ubo.frustumPlanes[i]) + radius < 0.0)
{
return false;
}
}
return true;
}
void main()
{
if (gl_InvocationID == 0)
{
//是否在视锥体之中
if (!frustumCheck())
{
//视锥体之外:内外细分等级归零
gl_TessLevelInner[0] = 0.0;
gl_TessLevelInner[1] = 0.0;
gl_TessLevelOuter[0] = 0.0;
gl_TessLevelOuter[1] = 0.0;
gl_TessLevelOuter[2] = 0.0;
gl_TessLevelOuter[3] = 0.0;
}
else
{
//视锥体之内
if (ubo.tessellationFactor > 0.0)
{
gl_TessLevelOuter[0] = screenSpaceTessFactor(gl_in[3].gl_Position, gl_in[0].gl_Position);
gl_TessLevelOuter[1] = screenSpaceTessFactor(gl_in[0].gl_Position, gl_in[1].gl_Position);
gl_TessLevelOuter[2] = screenSpaceTessFactor(gl_in[1].gl_Position, gl_in[2].gl_Position);
gl_TessLevelOuter[3] = screenSpaceTessFactor(gl_in[2].gl_Position, gl_in[3].gl_Position);
gl_TessLevelInner[0] = mix(gl_TessLevelOuter[0], gl_TessLevelOuter[3], 0.5);
gl_TessLevelInner[1] = mix(gl_TessLevelOuter[2], gl_TessLevelOuter[1], 0.5);
}
else
{
// 细分因子小于等于0时,恢复默认
gl_TessLevelInner[0] = 1.0;
gl_TessLevelInner[1] = 1.0;
gl_TessLevelOuter[0] = 1.0;
gl_TessLevelOuter[1] = 1.0;
gl_TessLevelOuter[2] = 1.0;
gl_TessLevelOuter[3] = 1.0;
}
}
}
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
outNormal[gl_InvocationID] = inNormal[gl_InvocationID];
outUV[gl_InvocationID] = inUV[gl_InvocationID];
}
细分控制着色器大致分为三部分:
- 首先判断是否在视锥体内,不在:不处理内外细分等级归零;否则进2;
- 判断外部传进的细分等级是否大于0,小于零:细分等级归1;否则进3;
- 根据相机位置与片元位置细分等级,对地形进行细分;
其中具体操作可参见着色器中注释代码;
2.3、表面细分评估着色器
从细分控制着色器生成的新的顶点数据,我们在本评估着色器中进行处理(类似顶点着色器):
#version 450
layout (set = 0, binding = 0) uniform UBO
{
mat4 projection;
mat4 modelview;
vec4 lightPos;
vec4 frustumPlanes[6];
float displacementFactor;
float tessellationFactor;
vec2 viewportDim;
float tessellatedEdgeSize;
} ubo;
layout (set = 0, binding = 1) uniform sampler2D displacementMap;
layout(quads, equal_spacing, cw) in;
layout (location = 0) in vec3 inNormal[];
layout (location = 1) in vec2 inUV[];
layout (location = 0) out vec3 outNormal;
layout (location = 1) out vec2 outUV;
layout (location = 2) out vec3 outViewVec;
layout (location = 3) out vec3 outLightVec;
layout (location = 4) out vec3 outEyePos;
layout (location = 5) out vec3 outWorldPos;
void main()
{
// 插入UV坐标
vec2 uv1 = mix(inUV[0], inUV[1], gl_TessCoord.x);
vec2 uv2 = mix(inUV[3], inUV[2], gl_TessCoord.x);
outUV = mix(uv1, uv2, gl_TessCoord.y);
vec3 n1 = mix(inNormal[0], inNormal[1], gl_TessCoord.x);
vec3 n2 = mix(inNormal[3], inNormal[2], gl_TessCoord.x);
outNormal = mix(n1, n2, gl_TessCoord.y);
// 插入的位置
vec4 pos1 = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);
vec4 pos2 = mix(gl_in[3].gl_Position, gl_in[2].gl_Position, gl_TessCoord.x);
vec4 pos = mix(pos1, pos2, gl_TessCoord.y);
// 置换
pos.y -= textureLod(displacementMap, outUV, 0.0).r * ubo.displacementFactor;
// 透视投影
gl_Position = ubo.projection * ubo.modelview * pos;
//基于细分位置计算照明的矢量
outViewVec = -pos.xyz;
outLightVec = normalize(ubo.lightPos.xyz + outViewVec);
outWorldPos = pos.xyz;
outEyePos = vec3(ubo.modelview * pos);
}
我们将新生成的顶点数据计算其对应的法线、UV、视角、光源位置等传递给下边的片元着色器去处理;
2.4、片元着色器
在片元着色器中,我们对经过上述三个管线处理后的所有数据进行光栅化处理。
大致分为三个阶段:
- 基础冯氏光照模型处理;
- 根据地形高度定义采样纹理;
- 根据距离雾化远传场景;
#version 450
layout (set = 0, binding = 1) uniform sampler2D samplerHeight;
layout (set = 0, binding = 2) uniform sampler2DArray samplerLayers;
layout (location = 0) in vec3 inNormal;
layout (location = 1) in vec2 inUV;
layout (location = 2) in vec3 inViewVec;
layout (location = 3) in vec3 inLightVec;
layout (location = 4) in vec3 inEyePos;
layout (location = 5) in vec3 inWorldPos;
layout (location = 0) out vec4 outFragColor;
//根据地形高度定义采样的层范围
vec3 sampleTerrainLayer()
{
vec2 layers[6];
layers[0] = vec2(-10.0, 10.0);
layers[1] = vec2(5.0, 45.0);
layers[2] = vec2(45.0, 80.0);
layers[3] = vec2(75.0, 100.0);
layers[4] = vec2(95.0, 140.0);
layers[5] = vec2(140.0, 190.0);
vec3 color = vec3(0.0);
//从位移图得到高度
float height = textureLod(samplerHeight, inUV, 0.0).r * 255.0;
for (int i = 0; i < 6; i++)
{
float range = layers[i].y - layers[i].x;
float weight = (range - abs(height - layers[i].y)) / range;
weight = max(0.0, weight);
color += weight * texture(samplerLayers, vec3(inUV * 16.0, i)).rgb;
}
return color;
}
//雾化
float fog(float density)
{
const float LOG2 = -1.442695;
float dist = gl_FragCoord.z / gl_FragCoord.w * 0.1;
float d = density * dist;
return 1.0 - clamp(exp2(d * d * LOG2), 0.0, 1.0);
}
void main()
{
// 1. 基础冯氏光照模型处理;
vec3 N = normalize(inNormal);
vec3 L = normalize(inLightVec);
vec3 ambient = vec3(0.5);
vec3 diffuse = max(dot(N, L), 0.0) * vec3(1.0);
//2. 根据地形高度定义采样纹理;
//vec4 color = vec4((ambient + diffuse), 1.0);
vec4 color = vec4((ambient + diffuse) * sampleTerrainLayer(), 1.0);
// 3. 根据距离雾化远传场景;
const vec4 fogColor = vec4(0.47, 0.5, 0.67, 0.0);
//mix(x,y,a) a控制混合结果 return x(1-a) +y*a 返回 线性混合的值
outFragColor = mix(color, fogColor, fog(0.25));
//outFragColor = color;
}
运行,可见如下效果:
你也可以在最后片元着色器中开启不同的效果来查看地形编辑处理(纹理细分等级8),如下:
高度区分纹理+雾化:
高度区分纹理:
普通纹理+雾化: