目录
任务
实现
rasterize_triangle
实现方法和上一个作业的插值一样,采用任意属性的透视矫正插值,将插值后的属性保存到片元即可。
作业框架中已经给出Vec2形式属性和Vec3形式属性的插值的函数interpolate,因此这里直接传入相关数据就可以直接得到插值的结果。
void rst::rasterizer::rasterize_triangle(const Triangle& t, const std::array<Eigen::Vector3f, 3>& view_pos)
{
auto v = t.toVector4();
int maxX,minX,maxY,minY;
maxX = (int)std::ceil(std::max(v[0].x(),std::max(v[1].x(),v[2].x())));
minX = (int)std::floor(std::min(v[0].x(),std::min(v[1].x(),v[2].x())));
maxY = (int)std::ceil(std::max(v[0].y(),std::max(v[1].y(),v[2].y())));
minY = (int)std::floor(std::min(v[0].y(),std::min(v[1].y(),v[2].y())));
for(int x =minX;x<maxX;x++){
for(int y=minY;y<maxY;y++){
if(insideTriangle(x+0.5,y+0.5,t.v)==true){
auto temp = computeBarycentric2D((float)x+0.5,(float)y+0.5,t.v);
const float alpha = std::get<0>(temp);
const float beta = std::get<1>(temp);
const float gamma = std::get<2>(temp);
const float Za = v[0].z();
const float Zb = v[1].z();
const float Zc = v[2].z();
float Z = 1.0 / (alpha / v[0].w() + beta / v[1].w() + gamma / v[2].w());
float zp = alpha * v[0].z() / v[0].w() + beta * v[1].z() / v[1].w() + gamma * v[2].z() / v[2].w();
zp *= Z;
//如果更靠近相机,更新片元
if(zp < depth_buf[get_index(x,y)]){
auto interpolated_color = interpolate(alpha,beta,gamma,t.color[0],t.color[1],t.color[2],1.0);
auto interpolated_normal = interpolate(alpha,beta,gamma,t.normal[0],t.normal[1],t.normal[2],1.0);
auto interpolated_texcoords = interpolate(alpha,beta,gamma,t.tex_coords[0],t.tex_coords[1],t.tex_coords[2],1);
auto interpolated_shadingcoords = interpolate(alpha,beta,gamma,view_pos[0],view_pos[1],view_pos[2],1.0);
Eigen::Vector3f color = t.color[0];
depth_buf[get_index(x,y)]=zp;
fragment_shader_payload payload( interpolated_color, interpolated_normal.normalized(), interpolated_texcoords, texture ? &*texture : nullptr);
payload.view_pos = interpolated_shadingcoords;
auto pixel_color = fragment_shader(payload);
frame_buf[get_index(x,y)] = pixel_color;
}
}
}
}
}
get_projection_matrix
原来图像是倒着的,这里对透视矩阵的x和y都加上了负号使其变成正的,而且我的程序将图放大了两倍,光照效果可能略有不同。
Eigen::Matrix4f get_projection_matrix(float eye_fov, float aspect_ratio, float zNear, float zFar)
{
// TODO: Use the same projection matrix from the previous assignments
Eigen::Matrix4f projection;
Eigen::Matrix4f persp_to_ortho;
Eigen::Matrix4f ortho;
float height = zNear/std::cos(eye_fov/2)*std::sin(eye_fov/2) * 2;
float width = height * aspect_ratio;
persp_to_ortho << zNear, 0, 0, 0,
0,zNear,0,0,
0,0,zNear + zFar,-zNear*zFar,
0,0,1,0;
ortho << -2.0/width , 0 ,0, 0,
0, -2.0/height,0,0,
0,0,2.0/(zNear-zFar),-(zNear+zFar)/(zNear-zFar),
0,0,0,1;
projection = ortho * persp_to_ortho * projection;
return projection;
}
phong_fragment_shader
根据Bling-Phong模型的公式直接写代码即可。
Eigen::Vector3f phong_fragment_shader(const fragment_shader_payload& payload)
{
//设置ka,kd,ks的参数
Eigen::Vector3f ka = Eigen::Vector3f(0.005, 0.005, 0.005);
Eigen::Vector3f kd = payload.color; //漫反射表面直接使用片元的颜色
Eigen::Vector3f ks = Eigen::Vector3f(0.7937, 0.7937, 0.7937);
//设置了两束光,左边是位置右边是光照强度
auto l1 = light{{20, 20, 20}, {500, 500, 500}};
auto l2 = light{{-20, 20, 0}, {500, 500, 500}};
std::vector<light> lights = {l1, l2}; //将光照存入vector用于下面的遍历
Eigen::Vector3f amb_light_intensity{10, 10, 10}; //环境光强度
Eigen::Vector3f eye_pos{0, 0, 10}; //相机位置
float p = 150; //反射指数
//获取片元的属性
Eigen::Vector3f color = payload.color;
Eigen::Vector3f point = payload.view_pos;
Eigen::Vector3f normal = payload.normal;
Eigen::Vector3f result_color = {0, 0, 0};
//遍历所有的光源对片元着色
for (auto& light : lights)
{
Eigen::Vector3f La = amb_light_intensity.cwiseProduct(ka); //环境光
Eigen::Vector3f lightVec = (light.position - point).normalized(); //光照方向
Eigen::Vector3f cameraVec = (eye_pos - point).normalized(); //相机方向
Eigen::Vector3f halfVec = (lightVec + cameraVec).normalized(); //半程向量
float lightDistance = (light.position - point).dot(light.position - point); //光源和着色点的距离,用于计算光照衰减
Eigen::Vector3f Ld = ( std::max(lightVec.dot(normal),0.0f) * (light.intensity/lightDistance) ).cwiseProduct(kd); //漫反射光
Eigen::Vector3f Ls = ( std::pow(std::max(halfVec.dot(normal),0.0f),p)* (light.intensity/lightDistance) ).cwiseProduct(ks); //镜面反射光
result_color += La + Ld + Ls; //将三个光加起来就是最后的结果
}
return result_color * 255.f;
}
texture_fragment_shader
实现起来和上面的phong_shader差不多,只不过原来片元的颜色从纹理上获取而已。
Eigen::Vector3f texture_fragment_shader(const fragment_shader_payload& payload)
{
Eigen::Vector3f return_color = {0, 0, 0};
if (payload.texture)
{
return_color = payload.texture->getColor(payload.tex_coords.x(),payload.tex_coords.y());
}
Eigen::Vector3f texture_color;
texture_color << return_color.x(), return_color.y(), return_color.z();
Eigen::Vector3f ka = Eigen::Vector3f(0.005, 0.005, 0.005);
Eigen::Vector3f kd = texture_color / 255.f;
Eigen::Vector3f ks = Eigen::Vector3f(0.7937, 0.7937, 0.7937);
auto l1 = light{{20, 20, 20}, {500, 500, 500}};
auto l2 = light{{-20, 20, 0}, {500, 500, 500}};
std::vector<light> lights = {l1, l2};
Eigen::Vector3f amb_light_intensity{10, 10, 10};
Eigen::Vector3f eye_pos{0, 0, 10};
float p = 150;
Eigen::Vector3f color = texture_color;
Eigen::Vector3f point = payload.view_pos;
Eigen::Vector3f normal = payload.normal;
Eigen::Vector3f result_color = {0, 0, 0};
for (auto& light : lights)
{
Eigen::Vector3f La = amb_light_intensity.cwiseProduct(ka);
Eigen::Vector3f lightVec = (light.position - point).normalized();
Eigen::Vector3f cameraVec = (eye_pos - point).normalized();
Eigen::Vector3f halfVec = (lightVec + cameraVec).normalized();
float lightDistance = (light.position - point).dot(light.position - point);
Eigen::Vector3f Ld = ( std::max(lightVec.dot(normal),0.0f) * (light.intensity/lightDistance) ).cwiseProduct(kd);
Eigen::Vector3f Ls = ( std::pow(std::max(halfVec.dot(normal),0.0f),p)* (light.intensity/lightDistance) ).cwiseProduct(ks);
result_color += La + Ld + Ls;
}
return result_color * 255.f;
}
bump_fragment_shader(最难部分)
切线空间:法线贴图中的法线向量定义在切线空间中,在切线空间中,三角形的法线永远指着正z方向。
这里的N是三角形的法线,永远向上,T是切线,B是N叉乘T得到的副切线。这个作业中所给的切线代码可能存在一些问题。比如当法向量N的x,y,z坐标都是正数时,算出来的切向量T的x,y,z也都是正的,和法向量不垂直。但是代码框架这样子,就先顺着这么写下去了。
切向量T和副切向量B不仅仅只和法向量N有关,还和uv有关,如果只和N有关的话,那么切向量其实我们可以指定任意方向,这样算出来的结果法向量重新映射回坐标系就有无数种可能。因此还需要uv来确定最终的坐标。具体的推导可以看LearnOpenGL--法线贴图的推到过程。
E是两个向量位置的差,ΔU和ΔV是纹理坐标的差
当我们得到了三个向量后,回想一下之前相机的变换时是怎么将相机坐标系变到世界坐标系的。
这里的TBN矩阵也是这个原理,只不过这边只涉及方向,不涉及位移,因此不需要考虑位移部分,只需要考虑旋转部分。
知道如何将求得的法向量映射回原空间后,接下来是如何根据法向量贴图求法向量。
二维情况下对p求导能得到切线方向,将其旋转90°后就能得到法向量方向。三维的情况也是如此
法向量贴图蓝色的原因:法向量(0,0,1)对应到RGB空间就是蓝色。
根据相对高度的变化求导,得到法向量方向,将其归一化得到在TBN坐标下的方向,在根据上面的TBN矩阵,将其映射回原坐标就能得到最终结果。
Eigen::Vector3f bump_fragment_shader(const fragment_shader_payload& payload)
{
Eigen::Vector3f ka = Eigen::Vector3f(0.005, 0.005, 0.005);
Eigen::Vector3f kd = payload.color;
Eigen::Vector3f ks = Eigen::Vector3f(0.7937, 0.7937, 0.7937);
auto l1 = light{{20, 20, 20}, {500, 500, 500}};
auto l2 = light{{-20, 20, 0}, {500, 500, 500}};
std::vector<light> lights = {l1, l2};
Eigen::Vector3f amb_light_intensity{10, 10, 10};
Eigen::Vector3f eye_pos{0, 0, 10};
float p = 150;
Eigen::Vector3f color = payload.color;
Eigen::Vector3f point = payload.view_pos;
Eigen::Vector3f normal = payload.normal;
float kh = 0.2, kn = 0.1;
float x = normal.x(),y = normal.y(),z = normal.z();
float sqrtXZ = std::sqrt(x*x + z*z);
Eigen::Vector3f t ={x*y/sqrtXZ,sqrtXZ,z*y/sqrtXZ};
Eigen::Vector3f b = normal.cross(t);
Eigen::Matrix3f TBN;
TBN << t.x() , b.x() , normal.x(),
t.y(), b.y(), normal.y(),
t.z(),b.z(),normal.z();
float u = payload.tex_coords.x();
float v = payload.tex_coords.y();
float w = payload.texture->width;
float h = payload.texture->height;
float dU = kh * kn * (payload.texture->getColorBilinear(u+1.0/w , v).norm()-payload.texture->getColorBilinear(u,v).norm() );
float dV = kh * kn * (payload.texture->getColorBilinear(u, v + 1.0/h).norm()-payload.texture->getColorBilinear(u,v).norm() );
Eigen::Vector3f ln ={-dU,-dV,1};
normal= (TBN * ln).normalized();
Eigen::Vector3f result_color = {0, 0, 0};
result_color = normal;
return result_color * 255.f;
}
displacement_fragment_shader
在上面的法向量的基础上再进行Bling-Phong着色即可。
Eigen::Vector3f displacement_fragment_shader(const fragment_shader_payload& payload)
{
Eigen::Vector3f ka = Eigen::Vector3f(0.005, 0.005, 0.005);
Eigen::Vector3f kd = payload.color;
Eigen::Vector3f ks = Eigen::Vector3f(0.7937, 0.7937, 0.7937);
auto l1 = light{{20, 20, 20}, {500, 500, 500}};
auto l2 = light{{-20, 20, 0}, {500, 500, 500}};
std::vector<light> lights = {l1, l2};
Eigen::Vector3f amb_light_intensity{10, 10, 10};
Eigen::Vector3f eye_pos{0, 0, 10};
float p = 150;
Eigen::Vector3f color = payload.color;
Eigen::Vector3f point = payload.view_pos;
Eigen::Vector3f normal = payload.normal;
float kh = 0.2, kn = 0.1;
float x = normal.x(),y = normal.y(),z = normal.z();
float sqrtXZ = std::sqrt(x*x + z*z);
Eigen::Vector3f t ={x*y/sqrtXZ,sqrtXZ,z*y/sqrtXZ};
Eigen::Vector3f b = normal.cross(t);
Eigen::Matrix3f TBN;
TBN << t.x() , b.x() , normal.x(),
t.y(), b.y(), normal.y(),
t.z(),b.z(),normal.z();
float u = payload.tex_coords.x();
float v = payload.tex_coords.y();
float w = payload.texture->width;
float h = payload.texture->height;
float dU = kh * kn * (payload.texture->getColorBilinear(u+1.0/w , v).norm()-payload.texture->getColorBilinear(u,v).norm() );
float dV = kh * kn * (payload.texture->getColorBilinear(u, v + 1.0/h).norm()-payload.texture->getColorBilinear(u,v).norm() );
Eigen::Vector3f ln ={-dU,-dV,1};
point = point + kn * normal * payload.texture->getColorBilinear(u,v).norm();
normal= (TBN * ln).normalized();
Eigen::Vector3f result_color = {0, 0, 0};
for (auto& light : lights)
{
Eigen::Vector3f La = amb_light_intensity.cwiseProduct(ka);
Eigen::Vector3f lightVec = (light.position - point).normalized();
Eigen::Vector3f cameraVec = (eye_pos - point).normalized();
Eigen::Vector3f halfVec = (lightVec + cameraVec).normalized();
float lightDistance = (light.position - point).dot(light.position - point);
Eigen::Vector3f Ld = ( std::max(lightVec.dot(normal),0.0f) * (light.intensity/lightDistance) ).cwiseProduct(kd);
Eigen::Vector3f Ls = ( std::pow(std::max(halfVec.dot(normal),0.0f),p)* (light.intensity/lightDistance) ).cwiseProduct(ks);
result_color += La + Ld + Ls;
}
return result_color * 255.f;
}
提高
双线性插值
Texture.hpp
Eigen::Vector3f getColorBilinear(float u, float v){
u = std::fmin(1.0,std::fmax(u,0));
v = std::fmin(1.0,std::fmax(v,0));
float u_img0 = u * width;
float v_img0 = v * height;
if((int)u !=1 || (int)u !=0 || (int)v !=1 || (int)v !=0){
float u_img1 = (int)(u * width);
float v_img1 = (int)(v * height);
float u_img2 = u_img1 +1;
float v_img2 = v_img1;
float u_img3 = u_img1;
float v_img3 = v_img1 + 1;
float u_img4 = u_img1 + 1;
float v_img4 = u_img1 + 1;
auto color1 = getColor(u_img1/width,v_img1/height);
auto color2 = getColor(u_img2/width,v_img2/height);
auto color3 = getColor(u_img3/width,v_img3/height);
auto color4 = getColor(u_img4/width,v_img4/height);
auto color5 = color1 + (color2 - color1) * (u_img0 - u_img1);
auto color6 = color3 + (color4 - color3) * (u_img0 - u_img1);
auto color7 = color5 + (color6 - color5) * (v_img0 - v_img1);
return color7;
}else{
return getColor(u,v);
}
}
导入其他模型
修改main函数中对应的路径即可。导入石头时可能会有问题,将eye_pos的z从10改为20即可,原因是z为10的时候石头超出了屏幕范围。
结果
normal方式着色
Bling-Phong着色
texture着色
法向量贴图着色(这里同时展示双线性插值的变化)
不采用双线性插值
采用双线性插值
可以很明显看出采用双线性插值后过渡更加平滑
displacement着色
导入其他模型