glfw和glad
#include <glad/glad.h>
#include <GLFW/glfw3.h>
glfw主要用于创建Opengl上下文,创建显示窗口以及接收用户输入,需要链接库。
glad用于管理OpenGL的函数指针,由于不同硬件、系统的驱动程序提供的函数存放在不同的位置,glad将其存储在统一的位置,以便用户调用。
(1)实例化
初始化GLFW窗口:
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);//主版本
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);//子版本
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);//使用core模式
创建窗口:
//宽、高、标题
GLFWwindow* window = glfwCreateWindow(800, 600, "HelloWorld", NULL, NULL);
if (window == NULL)
{
std::cout << "Failed to create GLFW window" << std::endl;
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);//将创建的窗口设置为主线程的上下文
初始化GLAD
//传入加载系统相关函数指针地址的函数glfwGetProcAddress
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
std::cout << "Failed to initialize GLAD" << std::endl;
return -1;
}
设置视口维度,OpenGL幕后将(-1, 1)映射到(0, 800)和(0,600)。若视口小于窗口,部分位置将显示在视口之外:
glViewport(0, 0, 800, 600);//左下角坐标,宽,高
用户输入
1.回调函数(不需要连续输入,如:文本输入事件)
当用户改变窗口大小时,视口也应该被调整,对窗口注册一个回调函数:
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
//注册函数,每当窗口大小变化时调用该函数
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
键盘回调函数:
//action
//GLFW_PRESS:按下
//GLFW_RELEASE: 松开
//GLFW_REPEAT:重复按键
//mods
//GLFW_MOD_SHIFT:按下Shift键
//GLFW_MOD_CONTROL:按下Ctrl键
//GLFW_MOD_ALT:按下Alt键
//GLFW_MOD_SUPER:无
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
switch (key)
{
// change the position of the camera
case GLFW_KEY_UP:
camera_y += camera_speed;
break;
case GLFW_KEY_DOWN:
camera_y -= camera_speed;
break;
...
}
}
glfwSetKeyCallback(window, key_callback);
鼠标回调函数:
//隐藏鼠标,焦点在程序上时捕捉鼠标
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
//鼠标控制的相机回调函数
void mouse_callback(GLFWwindow* window, double xpos, double ypos)
{
if(firstMouse)
{
lastX = xpos;
lastY = ypos;
firstMouse = false;
}
float xoffset = xpos - lastX;
float yoffset = lastY - ypos;
lastX = xpos;
lastY = ypos;
float sensitivity = 0.05;
xoffset *= sensitivity;
yoffset *= sensitivity;
yaw += xoffset;
pitch += yoffset;
if(pitch > 89.0f)
pitch = 89.0f;
if(pitch < -89.0f)
pitch = -89.0f;
glm::vec3 front;
front.x = sin(glm::radians(yaw)) * cos(glm::radians(pitch));
front.y = sin(glm::radians(pitch));
front.z = -cos(glm::radians(yaw)) * cos(glm::radians(pitch));
cameraFront = glm::normalize(front);
}
glfwSetCursorPosCallback(window, mouse_callback);
//鼠标控制的缩放回调函数
void scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
if(fov >= 1.0f && fov <= 45.0f)
fov -= yoffset;
if(fov <= 1.0f)
fov = 1.0f;
if(fov >= 45.0f)
fov = 45.0f;
}
glfwSetScrollCallback(window, scroll_callback);
2.循环中处理:
while (!glfwWindowShouldClose(window))
{
processInputs(window);
...
}
void processInputs(GLFWwindow* window)
{
...
float cameraSpeed = 0.05f; // adjust accordingly
if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS)
cameraPos += cameraSpeed * cameraFront;
if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS)
cameraPos -= cameraSpeed * cameraFront;
if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS)
cameraPos -= glm::normalize(glm::cross(cameraFront, cameraUp)) * cameraSpeed;
if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS)
cameraPos += glm::normalize(glm::cross(cameraFront, cameraUp)) * cameraSpeed;
}
渲染循环
float deltaTime = 0.0f; // 当前帧与上一帧的时间差
float lastFrame = 0.0f; // 上一帧的时间
while(!glfwWindowShouldClose(window))//每次循环检查窗口是否关闭
{
//glClear()清空屏幕缓冲GL_COLOR_BUFFER_BIT/GL_DEPTH_BUFFER_BIT/GL_STENCIL_BUFFER_BIT
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
//计算每帧时间
float currentFrame = glfwGetTime();
deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
//glfwGetKey(window, Key)检测输入
if(glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, true);
glfwPollEvents();//检查触发的事件,更新窗口状态,调用回调函数
glfwSwapBuffers(window);//交换颜色缓冲
}
结束后释放资源:
glfwTerminate();
return 0;
顶点数据
支持的顶点属性数查询
int nrAttributes;
glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &nrAttributes);
std::cout << "Maximum nr of vertex attributes supported: " << nrAttributes << std::endl;
接收(-1.0, 1.0)的标准化设备坐标:
float vertices[] = {
// ---- 位置 ---- ---- 颜色 ---- - 纹理坐标 -
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // 右上
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // 右下
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // 左下
-0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // 左上
};
创建并绑定VAO对象(Core模式必须设置VAO):
用于配置顶点属性,设置顶点数据布局
unsigned int VAO;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
通过VBO将大量顶点数据存入显存:
存储顶点数据
unsigned int VBO;
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
//将定义的顶点数据绑定到缓冲(缓冲类型, 数据大小,数据,管理方式)
//管理方式
//GL_STATIC_DRAW 数据几乎不变
//GL_DYNAMIC_DRAW 数据会被改变很多,放入能够高速写入的内存
//GL_STREAM_DRAW 数据每次绘制时都会改变
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
//or
glNamedBufferData(VBO, sizeof(vertices), vertices, GL_STATIC_DRAW);
顶点属性配置(VAO会存储这些内容)
//(layout(location=?)的位置值,顶点属性个数,数据类型,是否标准化,步长,偏移量)
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
//启用顶点属性(location)
glEnableVertexAttribArray(0);
//颜色
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3* sizeof(float)));
glEnableVertexAttribArray(1);
//纹理
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(2);
基本着色器(GLSL)
顶点着色器
#version 450 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aColor;
layout (location = 2) in vec2 aTexCoord;
out vec3 Color;
out vec2 TexCoord;
void main()
{
gl_Position = vec4(aPos, 1.0f);
Color = aColor;
TexCoord = aTexCoord;
}
片段着色器
#version 450 core
in vec3 Color;
in vec2 TexCoord;
uniform sampler2D ourTexture;
out vec4 FragColor;
void main()
{
FragColor = texture(ourTexture, TexCoord) * vec4(Color, 1.0f);
}
文件存储着色器
std::ifstream vShaderFile;
std::ifstream fShaderFile;
vShaderFile.open(vertexPath);
fShaderFile.open(fragmentPath);
std::stringstream vShaderStream, fShaderStream;
vShaderStream << vShaderFile.rdbuf();//rdbuf()一个流对象指向的内容用另一个流对象来输出
fShaderStream << fShaderFile.rdbuf();
vShaderFile.close();
fShaderFile.close();
const char* vertexCode = vShaderStream.str().c_str();
const char* fragmentCode = fShaderStream.str().c_str();
编译着色器
glGetShaderiv(GLuint shader, GLenum pname, GLint* params);
shader编号
pname指定类型
GL_COMPILE_STATUS(返回True/False)
GL_INFO_LOG_LENGTH(返回信息长度)
params返回指针
glGetShaderInfoLog(unsigned int shader, int bufMaxLen, int* retLen, char* buf);
shader-着色器句柄
bufMaxLen-缓冲区最大容量
retLen-真正的字符串长度
buf-指向用户返回的字符串缓冲区
unsigned int vertexShader, fragmentShader;
//vertexShader
vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &vertexCode, NULL);
glCompileShader(vertexShader);
//检验是否编译成功
int success;
char infoLog[512];
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
if(!success)
{
glGetShaderInfoLog(vertexShader, 512, NULL, infoLog);
std::cout << "Vertex Shader Compile Failed!" << infoLog << std::endl;
}
//fragmentShader
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &fragmentCode, NULL);
glCompileShader(fragmentShader);
链接到着色器程序
创建对象,链接:
unsigned int shaderProgram;
shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
//检验链接是否成功
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success);
if(!success) {
glGetProgramInfoLog(shaderProgram, 512, NULL, infoLog);
std::cout << "Shader Program Compile Failed!" << infoLog << std::endl;
}
//链接成功后可删除着色器对象
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
发送渲染指令时会调用被激活的着色器程序:
//激活着色器程序
glUseProgram(shaderProgram);
glBindVertexArray(VAO);
//(图元类型, 顶点数组的起始索引,绘制的顶点个数)
glDrawArrays(GL_TRAINGLES, 0, 3);
顶点索引对象绘制矩形
float vertices[] = {
0.5f, 0.5f, 0.0f, // 右上角
0.5f, -0.5f, 0.0f, // 右下角
-0.5f, -0.5f, 0.0f, // 左下角
-0.5f, 0.5f, 0.0f // 左上角
};
unsigned int indices[] = {
// 注意索引从0开始!
// 此例的索引(0,1,2,3)就是顶点数组vertices的下标,
// 这样可以由下标代表顶点组合成矩形
0, 1, 3, // 第一个三角形
1, 2, 3 // 第二个三角形
};
创建并绑定IBO
unsigned int IBO;
glGenBuffers(1, &IBO);
//绑定到GL_ELEMENT_ARRAY_BUFFER
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
使用IBO绘制
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IBO);
//(图元类型,绘制顶点个数,索引类型,偏移量)
glDrawElements(GL_TRAINGLES, 6, GL_UNSIGNED_INT, 0);
因为VAO能跟踪GL_ELEMENT_ARRAY_BUFFER的绑定,所以
glActiveTexture(GL_TEXTURE0); // 在绑定纹理之前先激活纹理单元
glBindTexture(GL_TEXTURE_2D, texture);//绑定这个纹理到当前激活的纹理单元
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
uniform
从CPU向GPU中发送全局的变量。
可用GL_MAX_VERTEX_UNIFORM_COMPONENTS查询最大uniform数。
glm::vec4 ourColor = glm::vec4(0.0f, 1.0f, 0.0f, 1.0f);
//查询uniform值的位置
int UniLocation = glGetUniformLocation(shaderProgram, "ourColor");
//更新uniform值前必需先调用useProgram,因为需要在该程序上更新
glUseProgram(shaderProgram);
//位置,size,数组
glUniform4fv(UniLocation, 1, glm::value_ptr(ourColor));
//或者
glUniform4f(UniLocation, 0.0f, 1.0f, 0.0f, 1.0f);
//纹理单元
glUniform1i(glGetUniformLocation(shaderProgram, "OurTexture"), 0);
纹理
#include "stb_image.h"
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
// 为当前绑定的纹理对象设置环绕、过滤方式
//GL_REPEAT 对纹理的默认行为。重复纹理图像。
//GL_MIRRORED_REPEAT 和GL_REPEAT一样,但每次重复图片是镜像放置的。
//GL_CLAMP_TO_EDGE 纹理坐标会被约束在0到1之间,超出的部分会产生一种边缘被拉伸的效果。
//GL_CLAMP_TO_BORDER 超出的坐标为用户指定的边缘颜色。需要定义额外参数float color[4]
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
//纹理被放大和缩小使用的过滤(采样)方式
//GL_LINEAR(平滑)/GL_NEAREST(锐利锯齿)
//多级渐远纹理的过滤方式
//插值方法_采样方法
//GL_NEAREST_MIPMAP_NEAREST 使用最邻近的多级渐远纹理来匹配像素大小,并使用邻近插值进行纹理采样
//GL_LINEAR_MIPMAP_NEAREST 使用最邻近的多级渐远纹理级别,并使用线性插值进行采样
//GL_NEAREST_MIPMAP_LINEAR 在两个邻近的多级渐远纹理之间进行线性插值,使用邻近插值进行采样
//GL_LINEAR_MIPMAP_LINEAR 在两个邻近的多级渐远纹理之间使用线性插值,并使用线性插值进行采样
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
//注意:多级渐远纹理主要是使用在纹理被缩小的情况下的,纹理放大不会使用多级渐远纹理,否则报错
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//用stbi_load载入图像数据
int width, height, channels;
stbi_set_flip_vertically_on_load(true);//翻转,因为图片的y=0在顶部
unsigned char* data = stbi_load("XXX.jpg", &width, &height, &channel, 0);
//载入纹理数据
//纹理目标,多级渐远纹理级别,纹理存储方式, 宽,高,历史遗留0,源图格式,存储为byte数组,数据
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
//生成多级渐远纹理
glGenerateMipmap(GL_TEXTURE_2D);
//释放内存
stbi_image_free(data);
纹理采样器(着色器)-纹理单元(0,1,...)-纹理对象(glGenTexture)
glUseProgram(shaderProgram);
//设置纹理采样器OurTexture所属的纹理单元0
glUniform1i(glGetUniformLocation(shaderProgram, "ourTexture"), 0);
//循环内------------------------------
//绑定纹理到激活的纹理单元0
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
矩阵变换
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
glm::mat4 trans = glm::mat4(1.0f);
trans = glm::translate(trans, glm::vec3(1, 1, 1));
//逆时针
trans = glm::rotate(trans, glm::radians(90.0f), glm::vec3(0.0, 0.0, 1.0));
trans = glm::scale(trans, glm::vec3(0.5, 0.5, 0.5));
glUseProgram(shaderProgram);
unsigned int transformLoc = glGetUniformLocation(shaderProgram, "transform");
//位置,数量,是否转置(默认列主序),数据
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(trans));
简便写法:
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtx/quaternion.hpp>
glm::vec3 Translate = { 0.f, 0.f, 0.f };
glm::vec3 Rotation = { 0.f, 0.f, 0.f };
glm::vec3 Scale = { 1.f, 1.f, 1.f };
return glm::translate(glm::mat4(1.0f), Translate)
* glm::toMat4(glm::quat(Rotation))
* glm::scale(glm::mat4(1.0f), Scale);
坐标变换
局部空间-(模型矩阵model)-》世界空间-(观察矩阵view)-》观察空间-(投影矩阵)-》裁剪空间(透视除法实现近大远小->标准化设备坐标)-(视口变换)》屏幕空间
gl_Position = MVP
glm投影矩阵
将所有顶点数据从观察空间转换为裁剪空间,裁剪坐标通过透视除法转换为标准化设备坐标(NDC)。
//左、右、底部、顶部、近平面、远平面
glm::ortho(0.0f, 800.0f, 0.0f, 600.0f, 0.1f, 100.0f);
//视角(45.0接近现实)、宽高比、近平面、远平面
glm::perspective(glm::radians(45.0f), (float)width/(float)height, 0.1f, 100.0f);
LookAt矩阵转换到观察空间
//位置,目标,上方向
glm::mat4 view = glm::lookAt(glm::vec3(0.0f, 0.0f, 3.0f),
glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 1.0f, 0.0f));
基础光照(布林-冯)
-----------------------------------vertexShader------------------------------
#version 450 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aNormal;
layout (location = 2) in vec2 aTexCoords;
out vec3 FragPos;
out vec3 Normal;
out vec2 TexCoords;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
FragPos = vec3(model * vec4(aPos, 1.0));
//使用模型矩阵的逆转矩阵作为法线矩阵,消除非等比缩放对法线的影响
//矩阵转化为mat3,使其失去位移属性,因为位移不影响法线
//矩阵求逆尽量使用uniform,因为每个顶点进行开销很大
Normal = mat3(transpose(inverse(model))) * aNormal;
TexCoords = aTexCoords;
gl_Position = projection * view * vec4(FragPos, 1.0);
}
-----------------------------------fragmentShader------------------------------
#version 450 core
in vec3 FragPos;
in vec3 Normal;
in vec2 TexCoords
uniform vec3 lightPos;
uniform vec3 viewPos;
uniform vec3 lightColor;
uniform vec3 objectColor;
struct Material {
//vec3 ambient;//通常是物体颜色
//vec3 diffuse;//通常是物体颜色
sampler2D diffuse;
//vec3 specular;//镜面反射强度
sampler2D specular;
float shininess;
};
uniform Material material;
struct Light {
vec3 ambient;//一般为光照颜色的0.2
vec3 diffuse;//一般为光照颜色的0.5
vec3 specular;
};
uniform Light light;
out vec4 FragColor;
void main()
{
vec3 ambient = vec3(texture(material.diffuse, TexCoords)) * light.ambient;
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(lightPos - FragPos);
float diff = max(dot(lightDir, norm), 0.0);//漫反射系数
vec3 diffuse = (diff * vec3(texture(material.diffuse, TexCoords))) * light.diffuse;
vec3 viewDir = normalize(viewPos-FragPos);
//reflect计算反射向量,第一个参数从光源到片段,故取反
vec3 reflectDir = reflect(-lightDir, norm);
//计算镜面分量
//反光度越高,散射越小
float spec = pow(max(dot(viewDir, reflectDir), 0.0), material.shininess);
vec3 specular = (spec * vec3(texture(material.specular, TexCoords))) * light.specular;
vec3 result = ambient + diffuse + specular;
FragColor = vec4(result, 1.0);
}
平行光
struct DirLight {
// vec3 position; // 使用定向光就不再需要了
vec3 direction;
vec3 ambient;
vec3 diffuse;
vec3 specular;
};
uniform DirLight light;
uniform Material material;
vec3 calculateDirLight(vec3 viewDir, vec3 light, vec3 normal)
{
vec3 lightDir = normalize(-light.direction);
//Diffuse
vec3 diff = max(dot(normal, lightDir), 0);
//Specular
vec3 reflectDir = reflect(-lightDir, normal);
vec3 spec = pow(max(dot(viewDir, reflectDir), 0.0), material.shiness);
vec3 ambient = light.ambient * vec3(texture(material.diffuse, TexCoords));
vec3 diffuse = diff * light.diffuse * vec3(texture(material.diffuse, TexCoords));
vec3 specular = spec * light.specular * vec3(texture(material.specular, TexCoords));
return ambient + diffuse + specular;
}
...
void main()
{
vec3 norm = normalize(Normal);
vec3 viewDir = normalize(viewPos - FragPos);
vec3 color = calculateDirLight(viewDir, light, norm)
...
}
点光源
struct PointLight {
vec3 position;
vec3 ambient;
vec3 diffuse;
vec3 specular;
//用于计算衰减的常数项、一次项、二次项
float constant;
float linear;
float quadratic;
};
uniform PointLight light;
vec3 CalculatePointLight(PointLight light, vec3 normal, vec3 fragPos, vec3 viewDir)
{
vec3 lightDir = normalize(light.position - FragPos);
//diffuse
vec3 dff = max(dot(lightDir, normal), 0);
//specular
vec3 reflectDir = reflect(-lightDir, normal);
float spec = pow(max(dot(reflectDir, viewDir), 0), material.shininess);
vec3 ambient = light.ambient * vec3(texture(material.diffuse, TexCoords));
vec3 diffuse = light.diffuse * diff * vec3(texture(material.diffuse, TexCoords));
vec3 specular = light.specular * spec * vec3(texture(material.specular, TexCoords));
// attenuation
float distance = length(light.position - FragPos);
float attenuation = 1.0 / (light.constant + light.linear * distance +
light.quadratic * (distance * distance));
// 合并结果
ambient *= attenuation;
diffuse *= attenuation;
specular *= attenuation;
return (ambient + diffuse + specular);
}
...
void main()
{
...
vec3 norm = normalize(Normal);
vec3 viewDir = normalize(viewPos - FragPos);
vec3 color = CalculatePointLight(light, norm, fragPos, viewDir);
FragColor = vec4(color, 1.0);
}
//------------------------------------------------------------------
lightingShader.setVec3("light.ambient", 0.2f, 0.2f, 0.2f);
lightingShader.setVec3("light.diffuse", 0.5f, 0.5f, 0.5f);
lightingShader.setVec3("light.specular", 1.0f, 1.0f, 1.0f);
lightingShader.setFloat("light.constant", 1.0f);
lightingShader.setFloat("light.linear", 0.09f);
lightingShader.setFloat("light.quadratic", 0.032f);
聚光
struct SpotLight {
vec3 position;
vec3 direction;
float cutOff;//聚光灯方向与内锥的余弦值,内椎以内为1.0
float outerCutOff;//用于平滑过渡的外椎体
...
};
uniform SpotLight light;
vec3 CalSpotLight(SpotLight light, vec3 normal, vec3 fragPos, vec3 viewDir)
{
vec3 lightDir = normalize(light.position - FragPos);
//diffuse
vec3 dff = max(dot(lightDir, normal), 0);
//specular
vec3 reflectDir = reflect(-lightDir, normal);
float spec = pow(max(dot(reflectDir, viewDir), 0), material.shininess);
vec3 ambient = light.ambient * vec3(texture(material.diffuse, TexCoords));
vec3 diffuse = light.diffuse * diff * vec3(texture(material.diffuse, TexCoords));
vec3 specular = light.specular * spec * vec3(texture(material.specular, TexCoords));
// attenuation
float distance = length(light.position - fragPos);
float attenuation = 1.0 / (light.constant + light.linear * distance + light.quadratic * (distance * distance));
//SpotLight------------------------------------------------------------------------
float theta = dot(lightDir, normalize(-light.direction));//片段位置与光照方向夹角余弦
//实际上是比较片段与外椎界间的角度差,寒霜引擎使用intensity平方
float epsilon = light.cutOff - light.outerCutOff;//总范围
float intensity = clamp((theta - light.outerCutOff) / epsilon, 0.0, 1.0);
ambient *= attenuation;//不对环境光做出影响,让它总是能有一点光
diffuse *= attenuation * intensity;
specular *= attenuation * intensity;
return (ambient + diffuse + specular);
}
void main()
{
...
vec3 norm = normalize(Normal);
vec3 viewDir = normalize(viewPos - FragPos);
vec3 color = CalSpotLight(light, norm, fragPos, viewDir);
FragColor = vec4(color, 1.0);
}
//------------------------------------------------------------------------------------
void main()
{
float theta = dot(lightDir, normalize(-light.direction));//片段位置与光照方向夹角余弦
//if(theta > light.cutOff)
//{
// // 执行光照计算
//}
//else // 否则,使用环境光,让场景在聚光之外时不至于完全黑暗
// color = vec4(light.ambient * vec3(texture(material.diffuse, TexCoords)), 1.0);
//---------------------------clamp平滑--------------------------------
//实际上是比较片段与外椎界间的角度差,寒霜引擎使用intensity平方
float epsilon = light.cutOff - light.outerCutOff;//总范围
float intensity = clamp((theta - light.outerCutOff) / epsilon, 0.0, 1.0);
...
//不对环境光做出影响,让它总是能有一点光
diffuse *= intensity;
specular *= intensity;
...
}
模版测试
在片段着色之后,用于根据模版的设置得到各种效果。
glEnable(GL_STENCIL_TEST);//启用模板测试
glClear(GL_STENCIL_BUFFER_BIT);//清除模板缓冲
//与写入的模板值进行AND运算
glStencilMask(0xFF);//保持原样
glStencilMask(0x00);//禁用写入
//glStencilFunc(GLenum func, GLint ref, GLuint mask);
//mask值会与参考值(ref)和当前模板值先进行AND运算
//只要模板值等于(GL_EQUAL)1,就被绘制
glStencilFunc(GL_EQUAL, 1, 0xFF);
//可选择GL_NEVER、GL_LESS、GL_LEQUAL、GL_GREATER、GL_GEQUAL、GL_EQUAL、
//GL_NOTEQUAL和GL_ALWAYS
//更新缓冲的方式
glStencilOp(GLenum sfail, GLenum dpfail, GLenum dppass)
//模版测试失败,模版成但深度败,模版深度pass
//GL_KEEP 保持当前储存的模板值
//GL_ZERO 将模板值设置为0
//GL_REPLACE 将模板值设置为glStencilFunc函数设置的ref值
//GL_INCR 如果模板值小于最大值则将模板值加1
//GL_INCR_WRAP 与GL_INCR一样,但如果模板值超过了最大值则归零
//GL_DECR 如果模板值大于最小值则将模板值减1
//GL_DECR_WRAP 与GL_DECR一样,但如果模板值小于0则将其设置为最大值
//GL_INVERT 按位翻转当前的模板缓冲值
glStencilFunc()控制哪些内容被绘制,而 glStencilMask()控制绘制的内容是否写入缓冲,glStencilOp()控制缓冲的更新方式。
物体轮廓
glEnable(GL_STENCIL_TEST);//启用模版测试
glStencilMask(0xFF);//开启模版缓冲写入
glStencilFunc(GL_ALWAYS, 1, 0xFF);//所有片段都会被绘制,模版缓冲也会更新
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);//深度和模版测试都通过则更新为1
//绘制物体
//绘制轮廓
glStencilFunc(GL_NOTEQUAL, 1, 0xFF);//仅绘制模版值不是1的地方
glStencilMask(0x00);//禁用模版缓冲写入
glDisable(GL_DEPTH_TEST);//使得轮廓不会被阻挡
//绘制放大一点的纯颜色物体
深度测试
深度缓冲包含了0.0到1.0的深度值,把远近平面之间的值映射到0到1
深度测试在片段着色器运行之后(以及模版测试运行之后),可以使用GLSL内建变量gl_FragCoord从片段着色器中访问屏幕空间坐标的x、y和z分量。
glEnable(GL_DEPTH_TEST);//开启深度测试
glDepthMask(GL_FALSE);//禁用深度缓冲的写入
glDepthFunc(GL_LESS);//深度测试函数,小于深度缓冲则通过
//GL_ALWAYS 永远通过深度测试
//GL_NEVER 永远不通过深度测试
//GL_LESS 在片段深度值小于缓冲的深度值时通过测试
//GL_EQUAL 等于
//GL_LEQUAL 小于等于
//GL_GREATER 大于
//GL_NOTEQUAL 不等于
//GL_GEQUAL 大于等于
while(!glfwWindowShouldClose(window))
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);//每次迭代应该清除深度缓冲
}
渲染背景
使用深度测试,先渲染物体,再渲染背景,节省背景开销。
// 物体
glBindVertexArray(cubeVAO);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, cubeTexture);
glDrawArrays(GL_TRIANGLES, 0, 36);
glBindVertexArray(0);
// 背景
glDepthFunc(GL_LEQUAL); //改变深度测试函数
skyboxShader.use();
view = glm::mat4(glm::mat3(camera.GetViewMatrix()));
skyboxShader.setMat4("view", view);
skyboxShader.setMat4("projection", projection);
glBindVertexArray(skyboxVAO);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_CUBE_MAP, cubemapTexture);
glDrawArrays(GL_TRIANGLES, 0, 36);
glBindVertexArray(0);
glDepthFunc(GL_LESS); // 还原默认深度测试函数
关键在于用于渲染背景的片段着色器的优化,将位置值的z设置为w,使其在透视除法后为1.0,始终为最大深度值背景像素。
void main()
{
TexCoords = aPos;
vec4 pos = projection * view * vec4(aPos, 1.0);
gl_Position = pos.xyww;
}
混合
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
混合指的是与颜色缓冲区的值混合
因为有深度缓冲,要保证如下顺序:
1.绘制不透明物体
2.按先远后近绘制透明物体
可以先存入map,再反向迭代
std::map<float, glm::vec3> sorted;
for(uint i = 0; i<textures.size(); ++i)
{
float dis = glm::length(camera.Position - textures[i]);
sorted[dis] = textures[i];
}
for(std::map<float, glm::vec3>::reverse_iterator it = sorted.rbegin(); it!=sorted.rend(); ++it)
{
model = glm::mat4();
model = glm::translate(model, it->second);
shader.SetMat("Model", model);
glDrawArrays(GL_TRIANGLES, 0, 6);
}
更高级的技术还有次序无关透明度(Order Independent Transparency, OIT)。
面剔除
默认把顶点为逆时针的面定义为正面,glFrontFace()函数可以设置,默认逆时针GL_CCW,顺时针GL_CW。(counter clock wise 和 clock wise)
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);//设置剔除的面
glFrontFace(GL_CCW);
自定义帧缓冲
unsigned int fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
//检测帧缓冲
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE)
{/.../}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteFramebuffers(1, &fbo);
用于帧缓冲的纹理附件
颜色附件
//创建绑定帧缓冲
GLuint fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
//创建纹理
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
//宽高是屏幕的宽、高,数据是NULL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, screen_width, screen_height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//将纹理绑定到帧缓冲
//target帧缓冲目标,
//attachment附件类型+索引,
//textarget附加的纹理类型,
//texture纹理本身,
//level多级渐远纹理级别
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture, 0);
//检测与解除绑定
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
std::cout << "ERROR::FRAMEBUFFER:: Framebuffer is not complete!" << std::endl;
glBindFramebuffer(GL_FRAMEBUFFER, 0);
读取特定位置的缓冲值
glReadBuffer(GL_COLOR_ATTACHMENT0 + attachmentIndex);
int pixelData;
//左下角坐标下x,y 矩形的宽度 高度 需要读取的像素数据(如:GL_RGB),保存格,保存指针
glReadPixels(x, y, 1, 1, GL_RED_INTEGER, GL_INT, &pixelData);
--------------额外
glCopyPixels//像素读取到内存
//复制来源的左下角坐标x,y 宽 高 GL_COLOR像素颜色,也可以是GL_DEPTH或GL_STENCIL,
渲染缓冲对象
只写缓冲,用于不需要采样的数据(通常为深度和模版)。渲染缓冲对象专门作为帧缓冲的附件使用。
好处:将数据储存为OpenGL原生的渲染格式,不需要做纹理格式转换,从而提高性能。
unsigned int rbo;
glGenRenderbuffers(1, &rbo);
glBindRenderbuffer(GL_RENDERBUFFER, rbo);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, screen_width, screen_height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);//分配了足够内存后可以解除绑定
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo);
具体流程:
1.绑定自定义帧缓冲,把场景渲染到帧缓冲。
2.绑定默认缓冲。
3.用帧缓冲的颜色附件纹理作为纹理,绘制覆盖屏幕的四边形。
// 第一处理阶段(Pass)
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer);
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // 我们现在不使用模板缓冲
glEnable(GL_DEPTH_TEST);
DrawScene();
// 第二处理阶段
glBindFramebuffer(GL_FRAMEBUFFER, 0); // 返回默认
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
screenShader.use();
glBindVertexArray(quadVAO);
glDisable(GL_DEPTH_TEST);
glBindTexture(GL_TEXTURE_2D, textureColorbuffer);
glDrawArrays(GL_TRIANGLES, 0, 6);
通过修改最终绘制到屏幕的片段着色器(screenShader的fragmentShader),就能产生各种效果。(对TexCoords.st偏移采样周围像素)如各种核效果:
void main()
{
vec2 offsets[9] = vec2[](
vec2(-offset, offset), // 左上
vec2( 0.0f, offset), // 正上
vec2( offset, offset), // 右上
vec2(-offset, 0.0f), // 左
vec2( 0.0f, 0.0f), // 中
vec2( offset, 0.0f), // 右
vec2(-offset, -offset), // 左下
vec2( 0.0f, -offset), // 正下
vec2( offset, -offset) // 右下
);
float kernel[9] = float[](
-1, -1, -1,
-1, 9, -1,
-1, -1, -1
);
vec3 sampleTex[9];
for(int i = 0; i < 9; i++)
{
sampleTex[i] = vec3(texture(screenTexture, TexCoords.st + offsets[i]));
}
vec3 col = vec3(0.0);
for(int i = 0; i < 9; i++)
col += sampleTex[i] * kernel[i];
FragColor = vec4(col, 1.0);
}
立方体贴图
有六个纹理坐标,每个对应立方体贴图的一个面:
GL_TEXTURE_CUBE_MAP_POSITIVE_X | 右 |
GL_TEXTURE_CUBE_MAP_NEGATIVE_X | 左 |
GL_TEXTURE_CUBE_MAP_POSITIVE_Y | 上 |
GL_TEXTURE_CUBE_MAP_NEGATIVE_Y | 下 |
GL_TEXTURE_CUBE_MAP_POSITIVE_Z | 后 |
GL_TEXTURE_CUBE_MAP_NEGATIVE_Z | 前 |
循环遍历存入数据:
GLuint textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_CUBE_MAP, textureID);
int width, height, nrChannels;
unsigned char *data;
for(unsigned int i = 0; i < textures_faces.size(); i++)
{
data = stbi_load(textures_faces[i].c_str(), &width, &height, &nrChannels, 0);
glTexImage2D(
GL_TEXTURE_CUBE_MAP_POSITIVE_X + i,
0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data
);
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(Gl_TEXTURE_CUBE_MAP, GL_TEXTURE_WARP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(Gl_TEXTURE_CUBE_MAP, GL_TEXTURE_WARP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(Gl_TEXTURE_CUBE_MAP, GL_TEXTURE_WARP_R, GL_CLAMP_TO_EDGE);
片段着色器
将顶点的位置向量作为纹理方向向量即TexCoords=aPos;
Cube采样器使用samplerCube
#version 450 core
out vec4 FragColor;
in vec3 TexCoords;
uniform samplerCube skybox;
void main()
{
FragColor = texture(skybox, TexCoords);
}
反射背景包围盒
使用GLSL的reflect函数计算反射向量,并从天空盒采集:
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec3 aNormal;
out vec3 Normal;
out vec3 Position;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
Normal = mat3(transpose(inverse(model))) * aNormal;
Position = vec3(model * vec4(aPos, 1.0));
gl_Position = projection * view * model * vec4(aPos, 1.0);
}
#version 330 core
out vec4 FragColor;
in vec3 Normal;
in vec3 Position;
uniform vec3 cameraPos;
uniform samplerCube skybox;
void main()
{
vec3 I = normalize(Position - cameraPos);
vec3 R = reflect(I, normalize(Normal));
FragColor = vec4(texture(skybox, R).rgb, 1.0);
}
缓冲处理
glBufferSubData更新特定缓冲区域,缓冲区域必须之前已分配空间,
即需先调用glBufferData或glBufferStorage。
glBufferSubData(GLenum target, GLintptr offset, GLsizeiptr size, const void *data);
target: 指定要更新的缓冲区类型,例如GL_ARRAY_BUFFER、GL_ELEMENT_ARRAY_BUFFER等。
offset: 指定更新数据在VBO中的偏移量。
size: 指定要更新的数据大小。
data: 指向要更新的数据的指针。
使用glBufferSubData可以更新VBO中的部分数据而不用每次重新上传所有数据。这可以提高渲染效率。
可用于分批方式输入数据,即布局为111222333而非123123:
float positions[] = { ... };
float normals[] = { ... };
float tex[] = { ... };
// 填充缓冲
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(positions), &positions);
glBufferSubData(GL_ARRAY_BUFFER, sizeof(positions), sizeof(normals), &normals);
glBufferSubData(GL_ARRAY_BUFFER, sizeof(positions) + sizeof(normals), sizeof(tex), &tex);
//顶点属性指针
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), 0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)(sizeof(positions)));
glVertexAttribPointer(
2, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), (void*)(sizeof(positions) + sizeof(normals)));
请求缓冲指针,直接复制到缓冲:
float data[] = {
0.5f, 1.0f, -0.35f
...
};
glBindBuffer(GL_ARRAY_BUFFER, buffer);
// 获取指针
void *ptr = glMapBuffer(GL_ARRAY_BUFFER, GL_WRITE_ONLY);
// 复制数据到内存
memcpy(ptr, data, sizeof(data));
// 记得告诉OpenGL我们不再需要这个指针了
glUnmapBuffer(GL_ARRAY_BUFFER);
复制缓冲
void glCopyBufferSubData(GLenum readtarget, GLenum writetarget, GLintptr readoffset,
GLintptr writeoffset, GLsizeiptr size);
//示例将vbo1 复制到 vbo2,内置可以绑定GL_READ_WRITE_BUFFER和GL_COPY_WRITE_BUFFER
glBindBuffer(GL_ARRAY_BUFFER, vbo1);
glBindBuffer(GL_COPY_WRITE_BUFFER, vbo2);
glCopyBufferSubData(GL_ARRAY_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, sizeof(vertexData));
内建变量
点gl_PointSize
glEnable(GL_PROGRAM_POINT_SIZE);
glUniform1f(glGetUniformLocation(shader.ID, "PointSize"), abs(100 * cos(currentTime)));
---------------------------------vertexShader-------------------------------
void main()
{
gl_Position = ViewProj * Model * vec4(aPos, 1.0);
gl_PointSize = PointSize;
}
顶点索引gl_VertxID存储当前绘制顶点的序号
片段坐标gl_FragColor
视口空间坐标(800X600)gl_FragColor.xy,片元深度gl_FragColor.z
gl_FrontFacing一个bool变量,true则为正面,false为背面
回顾glCullFace
glEnable(GL_CULL_FACE);
glFrontFace(GL_CCW);
glCullFace(GL_BACK);
gl_FragDepth设置片段的深度值,自动取用gl_FragCoord.z
但是会关闭所有的提前深度测试,因为无法在片段着色器运行之前的值片段的深度值
gl_FragDepth = 0.0;
可以为其添加条件,使其可用于提前深度测试
any | 默认值。提前深度测试是禁用的,你会损失很多性能 |
greater | 你只能让深度值比gl_FragCoord.z 更大 |
less | 你只能让深度值比gl_FragCoord.z 更小 |
unchanged | 如果你要写入gl_FragDepth ,你将只能写入gl_FragCoord.z 的值 |
#version 420 core // 注意GLSL的版本!
out vec4 FragColor;
layout (depth_greater) out float gl_FragDepth;
void main()
{
FragColor = vec4(1.0);
gl_FragDepth = gl_FragCoord.z + 0.1;
}
接口快(结构体)
...
out VS_OUT
{
vec2 TexCoords;
}vs_out
...
---------------------------------------------------
#version 330 core
out vec4 FragColor;
in VS_OUT
{
vec2 TexCoords;
} fs_in;
uniform sampler2D texture;
void main()
{
FragColor = texture(texture, fs_in.TexCoords);
}
Uniform缓冲
将数据存入uniform缓冲,
glGenBuffers-》glBindBuffer(GL_UNIFORM_BUFFER, );
unsigned int uboBuffer;
glGenBuffers(1, &uboBuffer);
glBindBuffer(GL_UNIFORM_BUFFER, uboBuffer);
glBufferData(GL_UNIFORM_BUFFER, 152, NULL, GL_STATIC_DRAW);//此处size需要计算
glBindBuffer(GL_UNIFORM_BUFFER, 0);
#version 330 core
layout(location = 0) in vec3 aPos;
layout (std140) uniform Matrices
{
mat4 projection;
mat4 view;
}
uniform mat4 model;
void main()
{
gl_Position = projection * view * model * (aPos, 1.0);
}
layout(std140)当前定义的块使用一个特定的内存布局。
我们需要知道每个变量的大小(字节)和偏移量(从块其实位置),让我们能够按顺序把它们放进缓冲。默认情况下GLSL使用共享的内存布局,GLSL会自动优化而改变变量的位置,但顺序不变。
使用std140布局声明了每个变量的偏移量都是由一系列规则所决定的。
基准偏移量:等于一个变量在uniform块中占据的空间(包括Padding),4个字节用N表示,一个变量的对齐字节偏移量必须等于基准对齐量的倍数:
类型 | 布局规则 |
---|---|
标量,比如int和bool | 每个标量的基准对齐量为N。 |
向量 | 2N或者4N。这意味着vec3的基准对齐量为4N。 |
标量或向量的数组 | 每个元素的基准对齐量4N。 |
矩阵 | 储存为列向量的数组,每个向量的基准对齐量位4N。 |
结构体 | 等于所有元素根据规则计算后的大小,但会填充到4N大小的倍数。 |
layout (std140) uniform ExampleBlock
{
// 基准对齐量 // 对齐偏移量
float value; // 4 // 0
vec3 vector; // 16 // 16 (必须是16的倍数,所以 4->16)
mat4 matrix; // 16 // 32 (列 0)
// 16 // 48 (列 1)
// 16 // 64 (列 2)
// 16 // 80 (列 3)
float values[3]; // 16 // 96 (values[0])
// 16 // 112 (values[1])
// 16 // 128 (values[2])
bool boolean; // 4 // 144
int integer; // 4 // 148
};
当计算出偏移量,就可以用glBufferSubData函数将变量填充到特定位置。保证了每个声明Uniform程序中的内存布局一致。
缓冲的绑定点
旧版本
//获取uniform块的索引
unsigned int uniformID = glGetUniformBlockIndex(shader.ID, "struct");
//将uniform块链接到指定的绑定点2
glUniformBlockBinding(shader.ID, uniformID, 2);
新版本(4.2后)可以在shader中指定绑定点:
layout(std140, binding = 2) uniform Lights { ... };
还需要将uniform缓冲对象绑定到对应的绑定点上:
glBindBufferBase(GL_UNIFORM_BUFFER, 2, uboBuffer);
//目标、绑定点、uniform缓冲
//or
glBindBufferRange(GL_UNIFORM_BUFFER, 2, uboBuffer, 0, 152);
//绑定uboBuffer的一部分到指定绑定点
//额外参数,偏移量、大小参数
使用glBufferSubData更新Uniform块数据:
glBindBuffer(GL_UNIFORM_BUFFER, uboBuffer);
int b = true;//GLSL的bool是4个字节,所以存储为一个int对象
glBufferSubData(GL_UNIFORM_BUFFER, 144, 4, &b);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
完整例子:
---sahder--------------------------
...
layout (std140, binding = 0) uniform Matrices
{
mat4 projection;
mat4 view;
}
-----------------------------------
GLuint uboMatrices;
glGenBuffers(1, &uboMatrices);
glBindBuffer(GL_UNIFORM_BUFFER, uboMatrices);
glBufferData(GL_UNIFORM_BUFFER, 2 * sizeof(glm::mat4), NULL, GL_STATIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glBindBufferRange(GL_UNIFORM_BUFFER, 0, uboMatrices, 0, 2* sizeof(glm::mat4));
---------------------------------------------------------------------------------
glm::mat4 projection = glm::perspective(glm::radians(45.0f), (float)width/(float)height, 0.1f, 100.0f);
glm::mat4 view = camera.GetViewMatrix();
glBindBuffer(GL_UNIFORM_BUFFER, uboMatrices);
glBufferSubData(GL_UNIFORM_BUFFER, 0, sizeof(glm::mat4), glm::value_ptr(projection));
glBufferSubData(GL_UNIFORM_BUFFER, sizeof(glm::mat4), sizeof(glm::mat4), glm::value_ptr(view));
glBindBuffer(GL_UNIFORM_BUFFER, 0);
几何着色器
编译连接几何着色器
geometryShader = glCreateShader(GL_GEOMETRY_SHADER);
glShaderSource(geometryShader, 1, &gShaderCode, NULL);
glCompileShader(geometryShader);
...
glAttachShader(program, geometryShader);
glLinkProgram(program);
在顶点着色器之后插入,可改变图元(输出以顶点为中心的为横线):
#version 450 core
layout (points) in;
layout (line_strip, max_vertices = 2) out;
void main() {
gl_Position = gl_in[0].gl_Position + vec4(-0.1, 0.0, 0.0, 0.0);
EmitVertex();//gl_Position的会作为图元的一个顶点
gl_Position = gl_in[0].gl_Position + vec4( 0.1, 0.0, 0.0, 0.0);
EmitVertex();
EndPrimitive();//用上面Emit的顶点合成指定图元
}
输入布局修饰符layout(points)对应提供给glDrawArrays的所有图元(括号数字为该图元最少顶点数):
points
:绘制GL_POINTS图元时(1)。lines
:绘制GL_LINES或GL_LINE_STRIP时(2)lines_adjacency
:GL_LINES_ADJACENCY或GL_LINE_STRIP_ADJACENCY(4)triangles
:GL_TRIANGLES、GL_TRIANGLE_STRIP或GL_TRIANGLE_FAN(3)triangles_adjacency
:GL_TRIANGLES_ADJACENCY或GL_TRIANGLE_STRIP_ADJACENCY(6)
输出布局修饰符可以接受几个图元:points、line_strip、triangle_strip,并输入最大顶点数量: max_vertices = n
前一阶段着色器输出为一个内建的变量数组(因为通常输出多个顶点),接受的坐标在观察空间,输出gl_Position时需要转换为裁剪空间:
in gl_Vertex
{
vec4 gl_Position;
float gl_PointSize;
float gl_ClipDistance[];
} gl_in[];
接口快的传递(总是以数组形式):
#version 330 core
layout (location = 0) in vec2 aPos;
layout (location = 1) in vec3 aColor;
out VS_OUT {
vec3 color;
} vs_out;
out vec3 color2;
void main()
{
gl_Position = vec4(aPos.x, aPos.y, 0.0, 1.0);
vs_out.color = aColor;
color2 = aColor;
}
------------------------
in VS_OUT{
vec3 color;
} gs_in[];
in vec3 color2[];//非结构体变量的传递
out vec3 fColor;//片元着色器仅需一个颜色,按照发射定顶点时的颜色传给片元着色器
void main()
{
fColor = gs_in[0].color;
gl_Position = gl_in[0].gl_Position + vec4(-0.2, -0.2, 0.0, 0.0); // 1:左下
EmitVertex();
gl_Position = gl_in[0].gl_Position + vec4( 0.2, -0.2, 0.0, 0.0); // 2:右下
EmitVertex();
gl_Position = gl_in[0].gl_Position + vec4(-0.2, 0.2, 0.0, 0.0); // 3:左上
EmitVertex();
gl_Position = gl_in[0].gl_Position + vec4( 0.2, 0.2, 0.0, 0.0); // 4:右上
EmitVertex();
gl_Position = gl_in[0].gl_Position + vec4( 0.0, 0.4, 0.0, 0.0); // 5:顶部
fColor = vec3(1.0, 1.0, 1.0);//顶部使用不同颜色
EmitVertex();
EndPrimitive();//绘制完一个图元需要调用
}
实例化
绘制多个物体时,使用一个渲染调用来绘制多个物体,来节省每次绘制物体时CPU和GPU间的通信
在使用实例化渲染调用时,gl_InstanceID会从0开始,在每个实例被渲染时递增1。
...
uniform vec2 offsets[100];
...
void main()
{
vec2 offset = offsets[gl_InstanceID];
gl_Position = vec4(aPos + offset, 0.0, 1.0);
fColor = aColor;
}
使用glDrawArraysInstanced或glDrawElementsInstanced进行实例渲染,末尾参数为实例数, 数组用“name[index]”表示
...
for(unsigned int i = 0; i < 100; i++)
{
std::string index = std::to_string(i);
shader.setVec2(("offsets[" + index + "]").c_str(), translations[i]);
}
...
glBindVertexArray(VAO);
glDrawArraysInstanced(GL_TRIANGLES, 0, 6, 100);
实例化数组
因为uniform数据有上限,可以把所有数据存储到VBO,再根据实例读取各自数据
把每个实例的独自属性设置为顶点属性,使用glVertexAttribDivisor(n,m);
第一个参数是顶点属性,此处的glEnableVertexAttribArray(2);
第二个参数是更新属性的频率,0(默认)表示每个顶点更新,1表示每个实例,2表示每2个实例...
...
layout (location = 2) in vec2 aOffset;
...
------------------------------------------------------------
unsigned int instanceVBO;
glGenBuffers(1, &instanceVBO);
glBindBuffer(GL_ARRAY_BUFFER, instanceVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec2) * 100, &translations[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, instanceVBO);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), (void*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glVertexAttribDivisor(2, 1);
抗锯齿
glfwWindowHint(GLFW_SAMPLES, 4);//提示GLFW进行多重采样
//每个像素自动创建4个自采样点的颜色缓冲
glEnable(GL_MULTISAMPLE);//显式调用多重缓冲
使用多重采样纹理附件处理
纹理目标是GL_TEXTURE_2D_MULTISAPLE
GLuint tex;
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, tex);
glTexImage2DMultisample(GL_TEXTURE_2D_MULTISAMPLE, samples, GL_RGB, width, height, GL_TRUE);
glBindTexture(GL_TEXTURE_2D_MULTISAMPLE, 0);
samples表示采样数,GL_TRUE表示图像会对每个纹素使用相同样本位置和采样数。
附加到帧缓冲:
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D_MULTISAMPLE, tex, 0);
或使用渲染缓冲对象
unsigned int rbo;
glGenRenderbuffers(1, &rbo);
glBindRenderbuffer(GL_RENDERBUFFER, rbo);
glRenderbufferStorageMultisample(GL_RENDERBUFFER, 4, GL_DEPTH24_STENCIL8, width, height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);//分配了足够内存后可以解除绑定
//渲染缓冲附加到帧缓冲
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo);
glBlitFramebuffer将像素块从读取的帧缓冲区复制到绘制帧缓冲区,并还原多重缓冲
Bilt位块传送,参数:源缓冲的区域,目标缓冲区域,缓冲对象格式,拉伸时应用的插值
glBindFramebuffer(GL_READ_FRAMEBUFFER, multisampledFBO);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(0, 0, width, height, 0, 0, width, height, GL_COLOR_BUFFER_BIT, GL_NEAREST);
我们不能直接在着色器读取多重采样帧缓冲对象的纹理,需要把它复制到另一个使用普通纹理的帧缓冲。
若想要获取多重采样的每个子样本,则使用sampler2DMS声明,并使用texelFetch进行采样:
uniform sampler2DMS screenTextureMS;
vec4 colorSample = texelFetch(screenTextureMS, TexCoords, 3); // 第4个子样本
伽马校正
glEnable(GL_FRAMEBUFFER_SRGB);//方法一,直接开启,自动进行伽马编码(1/gamma)
=====================================
void main()
{
// do super fancy lighting
[...]
//方法二,手动在像素着色器输出前进行伽马编码
float gamma = 2.2;
fragColor.rgb = pow(fragColor.rgb, vec3(1.0/gamma));
}
注意大部分纹理已经进行伽马编码,即使用sRGB空间的纹理,通常diffuse在sRGB空间,而specular在线性空间,在创建纹理时指定为sRGB,OpenGL会自动把颜色校正到线性空间中。
阴影贴图
创建阴影纹理
const GLuint SHADOW_WIDTH = 1024, SHADOW_HEIGHT = 1024;
GLuint depthMap;
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT,
SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
附加到深度缓冲
使用glDrawBuffer(GL_NONE); glReadBuffer(GL_NONE);显式告诉OpenGL我们不适用任何颜色数据进行渲染:
GLuint depthMapFBO;
glGenFramebuffers(1, &depthMapFBO);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
注意在渲染过程中调用glViewport适应阴影贴图尺寸:
// 1. 首选渲染深度贴图
glViewport(0, 0, SHADOW_WIDTH, SHADOW_HEIGHT);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glClear(GL_DEPTH_BUFFER_BIT);
RenderScene();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// 2. 像往常一样渲染场景,但这次使用深度贴图
glViewport(0, 0, SCR_WIDTH, SCR_HEIGHT);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindTexture(GL_TEXTURE_2D, depthMap);//使用阴影贴图
RenderScene();
需要使用光源空间绘制深度贴图,因此需要计算光源的投影和观察矩阵:
GLfloat near_plane = 1.0f, far_plane = 7.5f;
//平行光使用正交投影
glm::mat4 lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, near_plane, far_plane);
//观察矩阵
glm::mat4 lightView = glm::lookAt(glm::vec(-2.0f, 4.0f, -1.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
//光源位置的变换矩阵
glm::mat4 lightSpaceMatrix = lightProjection * lightView;
可以渲染到四边形进行检查:
#version 330 core
out vec4 color;
in vec2 TexCoords;
uniform sampler2D depthMap;
void main()
{
float depthValue = texture(depthMap, TexCoords).r;
color = vec4(vec3(depthValue), 1.0);
}
若使用透视投影的聚光灯,则需要把非线性深度转换为线性:
#version 330 core
out vec4 color;
in vec2 TexCoords;
uniform sampler2D depthMap;
uniform float near_plane;
uniform float far_plane;
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0; // Back to NDC
return (2.0 * near_plane * far_plane) / (far_plane + near_plane - z * (far_plane - near_plane));
}
void main()
{
float depthValue = texture(depthMap, TexCoords).r;
color = vec4(vec3(LinearizeDepth(depthValue) / far_plane), 1.0); // perspective
// color = vec4(vec3(depthValue), 1.0); // orthographic
}
渲染阴影
顶点着色器需要向片段着色器传递光源空间的顶点位置:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec2 texCoords;
out vec2 TexCoords;
out VS_OUT {
vec3 FragPos;
vec3 Normal;
vec2 TexCoords;
vec4 FragPosLightSpace;
} vs_out;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
vs_out.FragPos = vec3(model * vec4(position, 1.0));
vs_out.Normal = transpose(inverse(mat3(model))) * normal;
vs_out.TexCoords = texCoords;
vs_out.FragPosLightSpace = lightSpaceMatrix * vec4(vs_out.FragPos, 1.0);
}
片段着色器计算光照结果和阴影,该例子使用布林-冯模型,shadow的地方不会有diffuse和specular,注意从阴影纹理采样时,需要把光源空间坐标转移到NDC,并调整为[0,1]范围:
#version 330 core
out vec4 FragColor;
in VS_OUT {
vec3 FragPos;
vec3 Normal;
vec2 TexCoords;
vec4 FragPosLightSpace;
} fs_in;
uniform sampler2D diffuseTexture;
uniform sampler2D shadowMap;
uniform vec3 lightPos;
uniform vec3 viewPos;
float ShadowCalculation(vec4 fragPosLightSpace)
{
//透视除法,转换到NDC空间
vec3 projCoords = fragPosLightSpace.xyz/fragPosLightSpace.w;
//变换到[0,1]
projCoords = projCoords * 0.5 + 0.5;
float closestDepth = texture(shadowMap, projCoords.xy).r;
// 取得当前片段在光源视角下的深度
float currentDepth = projCoords.z;
// 检查当前片段是否在阴影中
float shadow = currentDepth > closestDepth ? 1.0 : 0.0;
//当带你的坐标比光源投影的元平面还要远,则shadow为0.0
if(currentDepth > 1.0)
shadow = 0.0;
return shadow;
}
void main()
{
vec3 color = texture(diffuseTexture, fs_in.TexCoords).rgb;
vec3 normal = normalize(fs_in.Normal);
vec3 lightColor = vec3(1.0);
// Ambient
vec3 ambient = 0.15 * color;
// Diffuse
vec3 lightDir = normalize(lightPos - fs_in.FragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// Specular
vec3 viewDir = normalize(viewPos - fs_in.FragPos);
float spec = 0.0;
vec3 halfwayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfwayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// 计算阴影
float shadow = ShadowCalculation(fs_in.FragPosLightSpace);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
FragColor = vec4(lighting, 1.0f);
}
解决阴影条纹问题:
根据光线和法线的方向调整,光线垂直时只有很少的偏差:
float bias = max(0.05 * (1.0 - dot(normal, lightDir)), 0.005);
float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
但是因为偏移量,看起来可能会有漂浮的感觉 ,解决办法是渲染纹理时使用正面剔除(只对内部不开放的物体适用):
glCullFace(GL_FRONT);
RenderSceneToDepthMap();
glCullFace(GL_BACK); // 不要忘记设回原先的culling face
阴影柔和
多个相邻片段会采样到同一个坐标的贴图深度值,几个片段得到同一个阴影,产生锯齿边。
解决办法一:可以提高阴影贴图的分辨率。
解决办法二:PCF均匀化。
使用textureSize(map, 0)返回纹理0级mipmap的宽和高,用1除以它的到每个文素的大小(0-1),然后采样周围文素平均化:
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(shadowMap, 0);
for(int x = -1; x <= 1; ++x)
{
for(int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
点光源的阴影
点光源会向四周发射,因此一般使用CubeMap贴图:
GLuint depthCubemap;
glGenTextures(1, &depthCubemap);
glBindTexture(GL_TEXTURE_CUBE_MAP, depthCubemap);
for(unsigned int i=0; i<6; ++i)
{
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X+i, 0, GL_DEPTH_COMPONENT,
SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
创建帧缓冲
GLuint depthMapFBO;
glGenFramebuffers(1, &depthMapFBO);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, depthCubemap, 0);
glDrawBuffer(GL_NONE);//不渲染到颜色缓冲
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
光源的透视投影矩阵:
视野设置为90度,保证足够大的视野填充立方体贴图的一面
GLfloat aspect = (GLfloat)SHADOW_WIDTH/(GLfloat)SHADOW_HEIGHT;
GLfloat near = 1.0f;
GLfloat far = 25.0f;
glm::mat4 shadowProj = glm::perspective(glm::radians(90.0f), aspect, near, far);
光源的视角矩阵:
每个方向都有一个朝向,合成转换矩阵数组
std::vector<glm::mat4> shadowTransforms;
shadowTransforms.push_back(shadowProj *
glm::lookAt(lightPos, lightPos + glm::vec3(1.0,0.0,0.0), glm::vec3(0.0,-1.0,0.0));
shadowTransforms.push_back(shadowProj *
glm::lookAt(lightPos, lightPos + glm::vec3(-1.0,0.0,0.0), glm::vec3(0.0,-1.0,0.0));
shadowTransforms.push_back(shadowProj *
glm::lookAt(lightPos, lightPos + glm::vec3(0.0,1.0,0.0), glm::vec3(0.0,0.0,1.0));
shadowTransforms.push_back(shadowProj *
glm::lookAt(lightPos, lightPos + glm::vec3(0.0,-1.0,0.0), glm::vec3(0.0,0.0,-1.0));
shadowTransforms.push_back(shadowProj *
glm::lookAt(lightPos, lightPos + glm::vec3(0.0,0.0,1.0), glm::vec3(0.0,-1.0,0.0));
shadowTransforms.push_back(shadowProj *
glm::lookAt(lightPos, lightPos + glm::vec3(0.0,0.0,-1.0), glm::vec3(0.0,-1.0,0.0));
渲染循环:
// 1. 渲染立方体深度贴图
glViewport(0, 0, SHADOW_WIDTH, SHADOW_HEIGHT);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glClear(GL_DEPTH_BUFFER_BIT);
RenderScene();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// 2.使用立方体深度贴图作为阴影贴图
glViewport(0, 0, SCR_WIDTH, SCR_HEIGHT);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_CUBE_MAP, depthCubemap);
RenderScene();
深度着色器
顶点着色器把顶点转换到世界空间
几何着色器把顶点变换到光空间,使用gl_Layer指定输出CubeMap的哪个面,并且把FragPos传递给片元着色器:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=18) out;
uniform mat4 shadowMatrices[6];
out vec4 FragPos; // FragPos from GS (output per emitvertex)
void main()
{
for(int face = 0; face < 6; ++face)
{
gl_Layer = face; // 指定图形放到立方体贴图的哪个面
for(int i = 0; i < 3; ++i) // for each triangle's vertices
{
FragPos = gl_in[i].gl_Position;//输入的pos
gl_Position = shadowMatrices[face] * FragPos;
EmitVertex();
}
EndPrimitive();
}
}
片元着色器需要计算世界空间下的片元和灯的距离,从而计算衰减:
平行光使用空像素着色器,让OpenGL自动配置深度贴图的深度值,这次我们自己计算:
#version 450 core
in vec4 FragPos;
uniform vec3 lightPos;
uniform float far_plane;
void main()
{
float lightDis = length(FragPos.xyz - lightPos);
gl_FragDepth = lightDis/far_plane;
}
渲染场景的着色器
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec2 texCoords;
out vec2 TexCoords;
out VS_OUT {
vec3 FragPos;
vec3 Normal;
vec2 TexCoords;
vec3 LightSpaceFragPos;
} vs_out;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
uniform mat4 lightSpaceMatrix;//光源空间的model矩阵
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
vs_out.FragPos = vec3(model * vec4(position, 1.0));
vs_out.Normal = transpose(inverse(mat3(model))) * normal;
vs_out.LightSpaceFragPos = lightSpaceMatrix * vs_out.FragPos;
vs_out.TexCoords = texCoords;
}
-------------------------
#version 330 core
out vec4 FragColor;
in VS_OUT {
vec3 FragPos;
vec3 Normal;
vec2 TexCoords;
vec3 LightSpaceFragPos;
} fs_in;
uniform sampler2D diffuseTexture;
uniform samplerCube depthMap;
uniform vec3 lightPos;
uniform vec3 viewPos;
uniform float far_plane;
uniform matrxi4
float ShadowCalculation(vec3 fragPos)
{
float closestDepth = texture(depthMap, fragPos).r;
closestDepth *= far_plane;//转换到光源空间的距离
float currentDepth = length(fragPos);
float bias = 0.05;
float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
return shadow;
}
void main()
{
vec3 color = texture(diffuseTexture, fs_in.TexCoords).rgb;
vec3 normal = normalize(fs_in.Normal);
vec3 lightColor = vec3(0.3);
// Ambient
vec3 ambient = 0.3 * color;
// Diffuse
vec3 lightDir = normalize(lightPos - fs_in.FragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// Specular
vec3 viewDir = normalize(viewPos - fs_in.FragPos);
vec3 reflectDir = reflect(-lightDir, normal);
float spec = 0.0;
vec3 halfwayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfwayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// Calculate shadow
float shadow = ShadowCalculation(LightSpaceFragPos);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
FragColor = vec4(lighting, 1.0f);
}
深度贴图
图中A为原采样点,计算深度H(A),把视角方向延伸H(A)的长度到P,H(P)成为真正的采样点。(深度图由1-高度图得到)
顶点着色器
需要使用法线空间的视角方向和灯光方向:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec2 texCoords;
layout (location = 3) in vec3 tangent;
layout (location = 4) in vec3 bitangent;
out VS_OUT {
vec3 FragPos;
vec2 TexCoords;
vec3 TangentLightPos;
vec3 TangentViewPos;
vec3 TangentFragPos;
} vs_out;
uniform mat4 projection;
uniform mat4 view;
uniform mat4 model;
uniform vec3 lightPos;
uniform vec3 viewPos;
void main()
{
gl_Position = projection * view * model * vec4(position, 1.0f);
vs_out.FragPos = vec3(model * vec4(position, 1.0));
vs_out.TexCoords = texCoords;
vec3 T = normalize(mat3(model) * tangent);
vec3 B = normalize(mat3(model) * bitangent);
vec3 N = normalize(mat3(model) * normal);
mat3 TBN = transpose(mat3(T, B, N));
vs_out.TangentLightPos = TBN * lightPos;
vs_out.TangentViewPos = TBN * viewPos;
vs_out.TangentFragPos = TBN * vs_out.FragPos;
}
片元着色器
利用深度计算偏移坐标,使用偏移坐标采样法线和漫反射纹理:
偏移计算:view.xy/view.z * height *height_scale,其中view.xy/view.z模拟接近平行观察时偏差大,height_scale是调节参数一般0.1
#version 330 core
out vec4 FragColor;
in VS_OUT {
vec3 FragPos;
vec2 TexCoords;
vec3 TangentLightPos;
vec3 TangentViewPos;
vec3 TangentFragPos;
} fs_in;
uniform sampler2D diffuseMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
uniform float height_scale;
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
float height = texure(depthMap, texCoords).r;
vec2 p = view.xy/view.z * height * height_scale;
return texCoords - p;
}
void main()
{
// Offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(fs_in.TangentViewPos - fs_in.TangentFragPos);
vec2 texCoords = ParallaxMapping(fs_in.TexCoords, viewDir);
// then sample textures with new texture coords
vec3 diffuse = texture(diffuseMap, texCoords);
vec3 normal = texture(normalMap, texCoords);
normal = normalize(normal * 2.0 - 1.0);//法线坐标变换到-1,1
// proceed with lighting code
[...]
}
图中,我们的理想深度在0.4-0.6之间, 我们从0.0深度的层次开始向下遍历,每层比较当前层次的深度值和当前偏移坐标的深度值,直到偏移坐标的深度值小于层次的深度值,然后插值:
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// 层次数
const float numLayers = 10;
// 每层的深度
float layerDepth = 1.0 / numLayers;
// 初始层次深度
float currentLayerDepth = 0.0;
// 每层的偏移量
vec2 P = viewDir.xy * height_scale;//深度为1的偏移量,即最大偏移量
vec2 deltaTexCoords = P/numLayers;
//当前坐标
vec2 currentTexCoords = texCoords;
//当前深度
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// 每层的偏移
currentTexCoords -= deltaTexCoords;
// 获得偏移后的深度值
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// 下一层的深度值
currentLayerDepth += layerDepth;
}
//插值部分
float prevTexCoords = currentTexCoords + deltaTexCoords;
float beforeDiff = texture(depthMap, prevTexCoords).r-(currentLayerDepth - layerDepth);
float currentDiff = currentLayerDepth - currentDepthMapValue;
//偏差越小,权重越大
float weight = (1-currentDiff/(beforeDiff + currentDiff));
//两个偏移量之间插值
currentTexCoords = weight * currentTexCoords + (1-weight) * prevTexCoords;
return currentTexCoords;
}
可以动态调整层次数,在垂直的时候使用更少的样本,使用mix函数:
const float minLayers = 8;
const float maxLayers = 32;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
HDR
使用浮点缓冲作为帧缓冲的颜色附件,颜色缓冲的内部格式被设定成了GL_RGB16F
, GL_RGBA16F
, GL_RGB32F
或者GL_RGBA32F
时,这些帧缓冲被叫做浮点帧缓冲,注意内部格式使用GL_FLOAT:
glBindTexture(GL_TEXTURE_2D, colorBuffer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB16F, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_FLOAT, NULL);
色调映射函数,把颜色从HDR映射回LDR而不确实细节:
//均匀映射
void main()
{
const float gamma = 2.2;
vec3 hdrColor = texture(hdrBuffer, TexCoords).rgb;
// Reinhard色调映射
vec3 mapped = hdrColor / (hdrColor + vec3(1.0));
// Gamma校正
mapped = pow(mapped, vec3(1.0 / gamma));
color = vec4(mapped, 1.0);
}
=====================================================
//曝光
uniform float exposure;//曝光参数
void main()
{
const float gamma = 2.2;
vec3 hdrColor = texture(hdrBuffer, TexCoords).rgb;
// 曝光色调映射
vec3 mapped = vec3(1.0) - exp(-hdrColor * exposure);
// Gamma校正
mapped = pow(mapped, vec3(1.0 / gamma));
color = vec4(mapped, 1.0);
}
Bloom泛光
亮度超过一定阈值的灯光进行模糊。
我们需要附加两个颜色缓冲
GLuint hdrFBO;
glGenFramebuffer(1, &hdrFBO);
glBindFramebuffers(GL_FRAMEBUFFER, hdrFBO);
GLuint colorBuffers[2];
glGenTextures(2, colorBuffers);
for(GLuint i=0; i<2; ++i)
{
glBindTexure(GL_TEXTURE_2D, colorBuffers[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB16F, SRC_WIDTH, SRC_HEIGHT, 0, GL_RGB, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TETXURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TETXURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0+i, GL_TEXTURE_2D, colorBuffers[i], 0);
}
通过glDrawBuffers显式告诉OpenGL 渲染到多个颜色缓冲附件:
GLuint attachments[2] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 };
glDrawBuffers(2, attachments);
片元着色器使用两个location,每个输出到一个颜色缓冲附件
layout(location = 0) out vec4 FragColor;
layout(location = 1) out vec4 BrightColor;
void main()
{
FragColor = vec4(lighting, 1.0);//正常计算光照
float brightness = dot(FragColor.rgb, vec3(0.2126, 0.7152, 0.0722));
if(brightness > 1.0)
BrightColor = vec4(FragColor.rgb, 1.0);
}
高斯模糊
需要三个帧缓冲,第一个有存储两个纹理,一个是场景,一个是捕捉场景亮部,另外两个帧缓冲用于交替进行高斯模糊。
分为水平和垂直方向降低计算量,每个像素与旁边的像素加权叠加:
#version 450 core
out vec4 FragColor;
in vec2 TexCoords;
uniform sampler2D image;
uniform bool horizontal;
uniform float weight[5] = float[](0.227027, 0.1945946, 0.1216216, 0.054054, 0.016216);
void main()
{
vec2 offset = 1.0/textureSize(image, 0);//文素大小
vec3 result = texture(image, TexCoords).rgb * weight[0];
if(horizontal)
{
for(int i=1; i<5; ++i)
{
result += texture(image, TexCoords + vec2(offset.x * i, 0.0)).rgb * weight[i];
result += texture(image, TexCoords - vec2(offset.x * i, 0.0)).rgb * weight[i];
}
}
else
{
for(int i=1; i<5; ++i)
{
result += texture(image, TexCoords + vec2(offset.y * i, 0.0)).rgb * weight[i];
result += texture(image, TexCoords - vec2(offset.y * i, 0.0)).rgb * weight[i];
}
}
FragColor = vec4(result, 1.0);
}
需要为模糊处理创建两个基本的帧缓冲,每个只有一个颜色附件:
GLuint pingpongFBO[2];
GLuint pingpongBuffer[2];
glGenFramebuffers(2, pingpongFBO);
glGenTextures(2, pingpongBuffer);
for (GLuint i = 0; i < 2; i++)
{
glBindFramebuffer(GL_FRAMEBUFFER, pingpongFBO[i]);
glBindTexture(GL_TEXTURE_2D, pingpongBuffer[i]);
glTexImage2D(
GL_TEXTURE_2D, 0, GL_RGB16F, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_FLOAT, NULL
);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glFramebufferTexture2D(
GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, pingpongBuffer[i], 0
);
}
使用第一个帧缓冲的第二个颜色附件进行第一次模糊,之后交替后两个帧缓冲进行模糊:
GLboolean horizontal = true, first_iteration = true;
GLuint amount = 10;
shader.use();
for(int i=0; i<amount; i++)
{
glBindFramebuffer(GL_FRAMEBUFFER, pingpongFBO[horizontal]);
glUniform1i(glGetUniformLocation(shader.ID, "horizontal"), horizontal);
glBindTexture(GL_TEXTURE2D, first_iteration ? colorBuffers[1] : pingpongBuffer[!horizontal]);
RenderQuad();
horizontal = !horizontal;
if(first_iteration)
first_iteration = false;
}
//最终使用horizontal纹理
glBindFramebuffer(GL_FRAMEBUFFER, 0);
sahderFinal.use();
glActiveTetxure(0);
glBindTexture(GL_TEXTURE2D, colorBuffers[0]);
glUniform1i(glGetUniformLocation(shader.ID, "scene"), colorBuffers[0]);
glActiveTetxure(1);
glBindTexture(GL_TEXTURE2D, pingpongBuffer[horizontal]);
glUniform1i(glGetUniformLocation(shader.ID, "blur"), pingpongBuffer[horizontal]);
Render();
混合两个纹理:
#version 450 core
uniform sampler2D scene;
uniform sampler2D blur;
uniform float explosure;
void main()
{
const float gamma = 2.2;
vec3 hdrColor = texture(scene, TexCoords).rgb;
vec3 bloomColor = texture(bloomBlur, TexCoords).rgb;
hdrColor += bloomColor;//相加混合
vec3 result = vec3(1.0) - exp(-hdrColor * exposure);
//伽马校正
result = pow(result, vec3(1.0 / gamma));
FragColor = vec4(result, 1.0f);
}
延迟着色
分为几何阶段和光照计算阶段,初始化一个帧缓冲Gbuffer,存有三个颜色缓冲,分别存储是位置、法线、镜面+颜色,还有一个渲染缓冲,用于存储深度值:
GLuint gBuffer;
glGenFramebuffers(1, &gBuffer);
glBindFramebuffer(GL_FRAMEBUFFER, gBuffer);
GLuint gPosition, gNormal, gColorSpec;
// - 位置缓冲
glGenTextures(1, &gPosition);
glBindTexture(GL_TEXTURE_2D, gPosition);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB16F, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGB, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, gPosition, 0);
// - 颜色 + 镜面颜色缓冲
glGenTextures(1, &gAlbedoSpec);
glBindTexture(GL_TEXTURE_2D, gAlbedoSpec);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, SCR_WIDTH, SCR_HEIGHT, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT2, GL_TEXTURE_2D, gAlbedoSpec, 0);
//...
// - 告诉OpenGL我们将要使用(帧缓冲的)哪种颜色附件来进行渲染
GLuint attachments[3] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1, GL_COLOR_ATTACHMENT2 };
glDrawBuffers(3, attachments);
//...
几何阶段的片元着色器渲染到缓冲
#version 330 core
layout (location = 0) out vec3 gPosition;
layout (location = 1) out vec3 gNormal;
layout (location = 2) out vec4 gAlbedoSpec;
in vec2 TexCoords;
in vec3 FragPos;
in vec3 Normal;
uniform sampler2D texture_diffuse1;
uniform sampler2D texture_specular1;
void main()
{
// 存储第一个G缓冲纹理中的片段位置向量
gPosition = FragPos;
// 同样存储对每个逐片段法线到G缓冲中
gNormal = normalize(Normal);
// 和漫反射对每个逐片段颜色
gAlbedoSpec.rgb = texture(texture_diffuse1, TexCoords).rgb;
// 存储镜面强度到gAlbedoSpec的alpha分量
gAlbedoSpec.a = texture(texture_specular1, TexCoords).r;
}
延迟光照处理
绑定GBuffer中的各个缓冲,发送观察位置,渲染屏幕:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
shaderLightingPass.Use();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gPosition);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, gNormal);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, gAlbedoSpec);
// 同样发送光照相关的uniform
SendAllLightUniformsToShader(shaderLightingPass);
glUniform3fv(glGetUniformLocation(shaderLightingPass.Program, "viewPos"), 1, &camera.Position[0]);
RenderQuad();
若要在延迟渲染之后再次进行正向渲染,则需要获取之前画面的深度值,使后面渲染得到正确结果,从gbuffer中读出深度值:
glReadBuffer(GL_READ_FRAMEBUFFER, gbuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBlitFramebuffer(
0, 0, SCR_WIDTH, SCR_HEIGHT, 0, 0, SCR_WIDTH, SCR_HEIGHT, GL_DEPTH_BUFFER_BIT, GL_NEAREST
);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// 正常渲染即可
SSAO
需要用到每个片段的深度值。
几何着色器渲染到线性深度纹理
可以从gl_FragCoord.z提取深度值,再转换为线性:
#version 330 core
layout (location = 0) out vec4 gPositionDepth;
layout (location = 1) out vec3 gNormal;
layout (location = 2) out vec4 gAlbedoSpec;
in vec2 TexCoords;
in vec3 FragPos;
in vec3 Normal;
const float NEAR = 0.1; // 投影矩阵的近平面
const float FAR = 50.0f; // 投影矩阵的远平面
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0; // 回到NDC
return (2.0 * NEAR * FAR) / (FAR + NEAR - z * (FAR - NEAR));
}
void main()
{
// 储存片段的位置矢量到第一个G缓冲纹理
gPositionDepth.xyz = FragPos;
// 储存线性深度到gPositionDepth的alpha分量
gPositionDepth.a = LinearizeDepth(gl_FragCoord.z);
// 储存法线信息到G缓冲
gNormal = normalize(Normal);
// 和漫反射颜色
gAlbedoSpec.rgb = vec3(0.95);
}
随机半球采样
//0-1的随机数
std::mt19937 rng(std::random_device{}());
std::uniform_real_distribution<GLfloat> rd(0.0, 1.0);
GLfloat lerp(GLfloat a, GLfloat b, GLfloat f)
{
return a + f * (b - a);
}
for (GLuint i = 0; i < 64; ++i)
{
glm::vec3 sample(
rng(rd) * 2.0 - 1.0,
rng(rd) * 2.0 - 1.0,
rng(rd)//z只有正,因为上半球
);
sample = glm::normalize(sample);
sample *= rng(rd);
GLfloat scale = GLfloat(i) / 64.0;
scale = lerp(0.1f, 1.0f, scale * scale);
sample *= scale;
ssaoKernel.push_back(sample);
}
SSAO的完整过程
// 几何处理阶段: 渲染到G缓冲中
glBindFramebuffer(GL_FRAMEBUFFER, gBuffer);
[...]
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// 使用G缓冲渲染SSAO纹理
glBindFramebuffer(GL_FRAMEBUFFER, ssaoFBO);
glClear(GL_COLOR_BUFFER_BIT);
shaderSSAO.Use();
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gPositionDepth);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, gNormal);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, noiseTexture);
SendKernelSamplesToShader();
glUniformMatrix4fv(projLocation, 1, GL_FALSE, glm::value_ptr(projection));
RenderQuad();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// 光照处理阶段: 渲染场景光照
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
shaderLightingPass.Use();
[...]
glActiveTexture(GL_TEXTURE3);
glBindTexture(GL_TEXTURE_2D, ssaoColorBuffer);
[...]
RenderQuad();
ssao着色器
#verison 450 core
out float FragColor;
in vec2 TexCoords;
uniform sampler2D gPos;
uniform sampler2D gNormal
uniform sampler2D texNoise;
uniform vec3 samples[64];
uniform mat4 proj;
//屏幕噪声的平铺纹理,分辨率除以噪声大小
const vec2 noiseScale = vec2(800.0/4.0, 600.0/4.0);
void main()
{
vec3 fragPos = texture(gPositionDepth, TexCoords).xyz;
vec3 normal = texture(gNormal, TexCoords).rgb;
vec3 randomVec = texture(texNoise, TexCoords * noiseScale).xyz;
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
//统计有多少个采样值没通过深度测试,从而确定灰度
float occlusion = 0.0;
for(int i = 0; i < kernelSize; ++i)
{
// 获取样本位置
vec3 sample = TBN * samples[i]; // 切线->观察空间
sample = fragPos + sample * radius;
vec4 offset = vec4(sample, 1.0);
offset = projection * offset; // 观察->裁剪空间
offset.xyz /= offset.w; // 透视划分
offset.xyz = offset.xyz * 0.5 + 0.5; // 变换到0.0 - 1.0的值域
//根据采样点位置采样深度纹理
float sampleDepth = -texture(gPositionDepth, offset.xy).w;
//根据采样半径对影响程度进行插值
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth));
occlusion += (sampleDepth >= sample.z ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / kernelSize);
FragColor = occlusion;
}
应用ssao纹理
在环境光中乘以采样值即可:
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
uniform sampler2D gPositionDepth;
uniform sampler2D gNormal;
uniform sampler2D gAlbedo;
uniform sampler2D ssao;
struct Light {
vec3 Position;
vec3 Color;
float Linear;
float Quadratic;
};
uniform Light light;
void main()
{
// Retrieve data from g-buffer
vec3 FragPos = texture(gPositionDepth, TexCoords).rgb;
vec3 Normal = texture(gNormal, TexCoords).rgb;
vec3 Diffuse = texture(gAlbedo, TexCoords).rgb;
float AmbientOcclusion = texture(ssao, TexCoords).r;
// Then calculate lighting as usual
vec3 ambient = vec3(0.3 * AmbientOcclusion); // <-- this is where we use ambient occlusion
vec3 lighting = ambient;
vec3 viewDir = normalize(-FragPos); // Viewpos is (0.0.0)
// Diffuse
vec3 lightDir = normalize(light.Position - FragPos);
vec3 diffuse = max(dot(Normal, lightDir), 0.0) * Diffuse * light.Color;
// Specular
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(Normal, halfwayDir), 0.0), 8.0);
vec3 specular = light.Color * spec;
// Attenuation
float distance = length(light.Position - FragPos);
float attenuation = 1.0 / (1.0 + light.Linear * distance + light.Quadratic * distance * distance);
diffuse *= attenuation;
specular *= attenuation;
lighting += diffuse + specular;
FragColor = vec4(lighting, 1.0);
}
PBR模型
#version 330 core
out vec4 FragColor;
in vec2 TexCoords;
in vec3 WorldPos;
in vec3 Normal;
uniform vec3 camPos;
uniform vec3 albedo;
uniform float metallic;
uniform float roughness;
uniform float ao;
uniform vec3 lightColor;
------------------------F
void fersnelShlick(float cos, vec3 F0)
{
return F0 + (1.0 - F0) * pow(clamp(1.0 - cosTheta, 0.0, 1.0), 5.0);
}
-----------------------D
float DistributionGGX(vec3 N, vec3 H, float roughness)
{
float a = roughness*roughness;
float a2 = a*a;
float NdotH = max(dot(N, H), 0.0);
float NdotH2 = NdotH*NdotH;
float num = a2;
float denom = (NdotH2 * (a2 - 1.0) + 1.0);
denom = PI * denom * denom;
return num / denom;
}
--------------------G
float GeometrySchlickGGX(float NdotV, float roughness)
{
float r = (roughness + 1.0);
float k = (r*r) / 8.0;
float num = NdotV;
float denom = NdotV * (1.0 - k) + k;
return num / denom;
}
float GeometrySmith(vec3 N, vec3 V, vec3 L, float roughness)
{
float NdotV = max(dot(N, V), 0.0);
float NdotL = max(dot(N, L), 0.0);
float ggx2 = GeometrySchlickGGX(NdotV, roughness);
float ggx1 = GeometrySchlickGGX(NdotL, roughness);
return ggx1 * ggx2;
}
void main()
{
vec3 N = normalize(Normal);
vec3 V = normalize(camPos - WorldPos);
vec3 Lo = vec3(0.0);
vec3 L = normalize(LightPos - WorldPos);
vec3 H = normalize(L + V);
float distance = length(lightPos - WorldPos);
float attenuation = 1.0/(distance * distance);
vec3 radiance = lightColor * attenuation;
vec3 F0 = vec3(0.4);
F0 = mix(F0, albedo, metallic);
vec3 F = fersnelShlick(max(dot(H,V), 0.0), F0);
float NDF = DistributionGGX(N, H, roughness);
float G = GeometrySmith(N, V, L, roughness);
vec3 specular = NDF * G * F/ (4.0 * max(dot(N, V), 0.0) * max(dot(N, L), 0.0));
float ks = F;
float kd = vec3(1.0) - ks;
kd *= 1.0 - metallic;//因为金属不会折射光线,没有漫反射
const float PI = 3.1415926535;
float NL = max(dot(N, L), 0.0);
Lo += (kd * albedo/PI + specular) * * radiance * NdotL;
vec3 ambient = vec3(0.03) * albedo * ao;
vec3 Color = ambient + Lo;
//因为Lo会超过1,需要映射到HDR
Color = Color/(Color + vec3(1.0));
//Gamma校正
Color = pow(Color, vec3(1.0/2.2));
FragColor = vec4(Color, 1.0);
}
IBL(Image Based Lighting)
预计算漫反射部分,因为该部分是赖于入射光方向wi的积分,可用卷积操作合成辐照度效果。
生成环境立方体贴图
将等距状投影图,需要用到局部坐标
#version 330 core
layout (location = 0) in vec3 aPos;
out vec3 localPos;
uniform mat4 projection;
uniform mat4 view;
void main()
{
localPos = aPos;
gl_Position = projection * view * vec4(localPos, 1.0);
}
---------------------
#version 330 core
out vec4 FragColor;
in vec3 localPos;
uniform sampler2D equirectangularMap;
const vec2 invAtan = vec2(0.1591, 0.3183);
vec2 SampleSphericalMap(vec3 v)
{
vec2 uv = vec2(atan(v.z, v.x), asin(v.y));
uv *= invAtan;
uv += 0.5;
return uv;
}
void main()
{
vec2 uv = SampleSphericalMap(normalize(localPos)); // make sure to normalize localPos
vec3 color = texture(equirectangularMap, uv).rgb;
FragColor = vec4(color, 1.0);
}
渲染用6次不同视角矩阵的渲染,渲染到6个立方体贴图(省略了创建帧缓冲和绑定):
glm::mat4 captureProjection = glm::perspective(glm::radians(90.0f), 1.0f, 0.1f, 10.0f);
glm::mat4 captureViews[] =
{
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3( 1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(-1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3( 0.0f, 1.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3( 0.0f, -1.0f, 0.0f), glm::vec3(0.0f, 0.0f, -1.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3( 0.0f, 0.0f, 1.0f), glm::vec3(0.0f, -1.0f, 0.0f)),
glm::lookAt(glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3( 0.0f, 0.0f, -1.0f), glm::vec3(0.0f, -1.0f, 0.0f))
};
// convert HDR equirectangular environment map to cubemap equivalent
equirectangularToCubemapShader.use();
equirectangularToCubemapShader.setInt("equirectangularMap", 0);
equirectangularToCubemapShader.setMat4("projection", captureProjection);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, hdrTexture);
glViewport(0, 0, 512, 512); // don't forget to configure the viewport to the capture dimensions.
glBindFramebuffer(GL_FRAMEBUFFER, captureFBO);
for (unsigned int i = 0; i < 6; ++i)
{
equirectangularToCubemapShader.setMat4("view", captureViews[i]);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, envCubemap, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
renderCube(); // renders a 1x1 cube
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
创建辐照度贴图
dw = sinθdfidθ
我们将采样的颜色值乘以系数 cos(θ) ,因为较大角度的光较弱,而系数 sin(θ) 则用于权衡较高半球区域的较小采样区域的贡献度
#version 330 core
out vec4 FragColor;
in vec3 localPos;
uniform samplerCube environmentMap;
const float PI = 3.14159265359;
void main()
{
// the sample direction equals the hemisphere's orientation
vec3 normal = normalize(localPos);
vec3 irradiance = vec3(0.0);
vec3 irradiance = vec3(0.0);
vec3 up = vec3(0.0, 1.0, 0.0);
vec3 right = cross(up, normal);
up = cross(normal, right);
float sampleDelta = 0.025;
float nrSamples = 0.0;
for(float phi = 0.0; phi < 2.0 * PI; phi += sampleDelta)
{
for(float theta = 0.0; theta < 0.5 * PI; theta += sampleDelta)
{
// spherical to cartesian (in tangent space)
vec3 tangentSample = vec3(sin(theta) * cos(phi), sin(theta) * sin(phi), cos(theta));
// tangent space to world
vec3 sampleVec = tangentSample.x * right + tangentSample.y * up + tangentSample.z * N;
irradiance += texture(environmentMap, sampleVec).rgb * cos(theta) * sin(theta);
nrSamples++;
}
}
irradiance = PI * irradiance * (1.0 / float(nrSamples));
FragColor = vec4(irradiance, 1.0);
}
与立方体贴图类似,我们需要在6个观察角度渲染到立方体纹理,此处不再重复,其中纹理使用低分辨率即可,因为已经进行了卷积操作:
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB16F, 32, 32, 0, GL_RGB, GL_FLOAT, nullptr);
使用辐照度贴图
#version 450 core
in vec3 Normal;
uniform samplerCube irradianceMap;
//之前的版本
//void fersnelShlick(float cos, vec3 F0)
//{
// return F0 + (1.0 - F0) * pow(clamp(1.0 - cosTheta, 0.0, 1.0), 5.0);
//}
vec3 fresnelSchlickRoughness(float cosTheta, vec3 F0, float roughness)
{
return F0 + (max(vec3(1.0 - roughness), F0) - F0) * pow(1.0 - cosTheta, 5.0);
}
void main()
{
...
vec3 kS = fresnelSchlickRoughness(max(dot(N, V), 0.0), F0, roughness);
vec3 kD = 1.0 - kS;
vec3 = irradiance = texture(irradianceMap, Normal).rgb;
vec3 diffuse = irradiance * albedo;
vec3 ambient = (kD * diffuse) * ao;
}
因为无法计算半程向量,使用法线进行采样。相较于之前的版本,使用粗糙度使得粗糙表面的边缘反射减弱。
使用textureLod(gsmapler* tex, vec, float lod);对mipmap的贴图采样,lod是mip级别,越高越模糊。
可以根据粗糙度设置mipmap级别:
vec3 prefilteredColor = textureLod(prefilterMap, R, roughness * MAX_REFLECTION_LOD).rgb;
tps:glGenerateMipmap(GL_TEXTURE_CUBE_MAP);//让OpenGL生成mipmap
或手动设置:glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, prefilterMap, mip);
球体顶点
void renderSphere()
{
if (sphereVAO == 0)
{
glGenVertexArrays(1, &sphereVAO);
unsigned int vbo, ebo;
glGenBuffers(1, &vbo);
glGenBuffers(1, &ebo);
std::vector<glm::vec3> positions;
std::vector<glm::vec2> uv;
std::vector<glm::vec3> normals;
std::vector<unsigned int> indices;
//顶点 每个平面从x轴逆时针依次采样 球体从上到下逐平面依次采样顶点
const unsigned int X_SEGMENTS = 64;
const unsigned int Y_SEGMENTS = 64;
const float PI = 3.14159265359f;
for (unsigned int x = 0; x <= X_SEGMENTS; ++x)
{
for (unsigned int y = 0; y <= Y_SEGMENTS; ++y)
{
float xSegment = (float)x / (float)X_SEGMENTS;
float ySegment = (float)y / (float)Y_SEGMENTS;
float xPos = std::cos(xSegment * 2.0f * PI) * std::sin(ySegment * PI);
float yPos = std::cos(ySegment * PI);
float zPos = std::sin(xSegment * 2.0f * PI) * std::sin(ySegment * PI);
positions.push_back(glm::vec3(xPos, yPos, zPos));
uv.push_back(glm::vec2(xSegment, ySegment));
normals.push_back(glm::vec3(xPos, yPos, zPos));
}
}
//索引
bool oddRow = false;
for (unsigned int y = 0; y < Y_SEGMENTS; ++y)
{
if (!oddRow) // even rows: y == 0, y == 2; and so on
{
for (unsigned int x = 0; x <= X_SEGMENTS; ++x)
{
indices.push_back(y * (X_SEGMENTS + 1) + x);
indices.push_back((y + 1) * (X_SEGMENTS + 1) + x);
}
}
else
{
for (int x = X_SEGMENTS; x >= 0; --x)
{
indices.push_back((y + 1) * (X_SEGMENTS + 1) + x);
indices.push_back(y * (X_SEGMENTS + 1) + x);
}
}
oddRow = !oddRow;
}
indexCount = static_cast<unsigned int>(indices.size());
std::vector<float> data;
for (unsigned int i = 0; i < positions.size(); ++i)
{
data.push_back(positions[i].x);
data.push_back(positions[i].y);
data.push_back(positions[i].z);
if (normals.size() > 0)
{
data.push_back(normals[i].x);
data.push_back(normals[i].y);
data.push_back(normals[i].z);
}
if (uv.size() > 0)
{
data.push_back(uv[i].x);
data.push_back(uv[i].y);
}
}
glBindVertexArray(sphereVAO);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, data.size() * sizeof(float), &data[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned int), &indices[0], GL_STATIC_DRAW);
unsigned int stride = (3 + 3 + 2) * sizeof(float);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, stride, (void*)0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, stride, (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, stride, (void*)(6 * sizeof(float)));
}
glBindVertexArray(sphereVAO);
glDrawElements(GL_TRIANGLE_STRIP, indexCount, GL_UNSIGNED_INT, 0);
}
四元数
在调试器中(x,y,z,w),w表示cos(angle/2),所以角度为2*acos(w)。
欧拉角转换:
glm::vec3 EulerAngles(90, 45, 0);//表示绕轴旋转的角度,x、y、z
glm::quat MyQuaternion = glm::quat(EulerAngles);
旋转角度坐标轴:
glm::quat MyQuaternion = gtx::quaternion::angleAxis(degrees(rotationAngle), rotationAxis) ;
glm::mat4 RotationMatrix = glm::quaternion::toMat4(MyQuaternion);
两个旋转的累计:
只需将两个四元数相乘即可。顺序和矩阵乘法一致。亦即逆序相乘:
glm::quat combined_rotation = second_rotation * first_rotation;