软渲染器六之透视纹理映射

我的软件渲染器博客系列的第五篇才讲到win32如何实现像素的绘制,第六篇就直接讲关于透视纹理映射的,这也可能是本系列的最后一篇了,其实整个3D渲染器我用2500多行代码实现,在看本篇博客前我假设你已经有下面的基础了:

       (1)三角形的光栅化,如果不懂请参见<3D游戏编程大师技巧下册>的9.4小节

       (2)uv的插值,仿射纹理映射,如果不懂请参见<3D游戏编程大师技巧下册>的9.6小节

 

 

好吧,我继续分享两篇有关于透视纹理映射的技术博客:深入探索透视纹理映射(上)深入探索透视纹理映射(下)

 

其实上面已经给出了能够完整实现一个透视纹理映射的资料,还有什么问题呢?

深入探索透视纹理映射(上)深入探索透视纹理映射(下)这两篇技术博客 我们看清了透视投影映射的完整推导过程,但衍生出了两三个问题,我一一说明并给出解决办法。

 

 

一,顶点在相机空间的Z值丢失问题。

 

二,怎么求出像素的Z缓存(像素在屏幕空间的Z值)。

 

 

 

 

给出我们光栅化的代码:

 

 

光栅化平顶三角形(透视纹理映射)的代码:

 

//用扫描线法绘制平顶三角形,y2=y3
//扫描线的ZU,ZV,Z各个扫描步长是不一样的,X和Y的扫描步长为1.0,而ZU
//视口空间或者NDC空间的Z值与相机空间的Z值的倒数为线性关系,
//支持漫反射光照和环境光
void DrawTopTriangleInTexture(Vertex* p1, Vertex* p2, Vertex* p3)
{

	float A = SCREEN_FAR / (SCREEN_FAR - SCREEN_NEAR);
	float B = -(SCREEN_FAR* SCREEN_NEAR) / (SCREEN_FAR - SCREEN_NEAR);

	int x1 = p1->x;
	int y1 = p1->y;
	float u1 = p1->u;
	float v1 = p1->v;
	float z1 = p1->z;
	float nx1 = p1->nx;
	float ny1 = p1->ny;
	float nz1 = p1->nz;
	int  x2;
	int  y2;
	float u2;
	float v2;
	float z2;
	float nx2;
	float ny2;
	float nz2;
	int  x3;
	int  y3;
	float u3;
	float v3;
	float z3;
	float nx3;
	float ny3;
	float nz3;

	//调整P2的参数在左边,而P3的参数在右边
	if (p2->x > p3->x)
	{
		x2 = p3->x;
		y2 = p3->y;
		z2 = p3->z;
		u2 = p3->u;
		v2 = p3->v;
		nx2 = p3->nx;
		ny2 = p3->ny;
		nz2 = p3->nz;

		x3 = p2->x;
		y3 = p2->y;
		z3 = p2->z;
		u3 = p2->u;
		v3 = p2->v;
		nx3 = p2->nx;
		ny3 = p2->ny;
		nz3 = p2->nz;
	}
	else
	{
		x2 = p2->x;
		y2 = p2->y;
		u2 = p2->u;
		v2 = p2->v;
		nx2 = p2->nx;
		ny2 = p2->ny;
		nz2 = p2->nz;

		z2 = p2->z;
		x3 = p3->x;
		y3 = p3->y;
		u3 = p3->u;
		v3 = p3->v;
		z3 = p3->z;
		nx3 = p3->nx;
		ny3 = p3->ny;
		nz3 = p3->nz;
	}

	//扫描线左右端点的值
	int  left_x;
	int  right_x;
	float left_zu;
	float right_zu;
	float left_zv;
	float right_zv;
	float left_z;
	float right_z;
	float left_znx;
	float right_znx;
	float left_zny;
	float right_zny;
	float left_znz;
	float right_znz;

	//每个变量在扫描线每步横跨的距离
	float ZVOneStep;
	float ZUOneStep;
	float ZOneStep;
	float ZNXOneStep;
	float ZNYOneStep;
	float ZNZOneStep;


	float ZU;
	float ZV;
	float Z;
	int x, y;
	float ZNX;
	float ZNY;
	float ZNZ;


	//求出各个顶点在相机空间的Z值的倒数
	float Z1_Camera_Rec = 1.0 / (B / (z1 - A));
	float Z2_Camera_Rec = 1.0 / (B / (z2 - A));
	float Z3_Camera_Rec = 1.0 / (B / (z3 - A));

	//循环里两点式分母不能为0
	if (y2 == y1 || y3 == y1)
	{
		return;
	}

	//注意 y>=0&& y<=screen_h,我没进行视平面的裁剪
	for (y = y1; y >= 0 && y <= screen_h&&y >= y2; --y)
	{
		
		//三角形左右边的点X
		left_x = (y - y1) *(x2 - x1) / (y2 - y1) + x1 ;
		right_x = (y - y1) *(x3 - x1) / (y3 - y1) + x1+2;

		left_zu = (float)(y - y1) *(Z2_Camera_Rec*u2 - Z1_Camera_Rec*u1) / (float)(y2 - y1) + Z1_Camera_Rec*u1;
		right_zu = (float)(y - y1) *(Z3_Camera_Rec*u3 - Z1_Camera_Rec*u1) / (float)(y3 - y1) + Z1_Camera_Rec*u1;

		//三角形左右边的点的(1/Z)V值
		left_zv = (float)(y - y1) *(Z2_Camera_Rec*v2 - Z1_Camera_Rec*v1) / (float)(y2 - y1) + Z1_Camera_Rec*v1;
		right_zv = (float)(y - y1) *(Z3_Camera_Rec*v3 - Z1_Camera_Rec*v1) / (float)(y3 - y1) + Z1_Camera_Rec*v1;


		//三角形左右边的点的(1/Z)值,
		left_z = (float)(y - y1) *(Z2_Camera_Rec - Z1_Camera_Rec) / (float)(y2 - y1) + Z1_Camera_Rec;
		right_z = (float)(y - y1) *(Z3_Camera_Rec - Z1_Camera_Rec) / (float)(y3 - y1) + Z1_Camera_Rec;

		//三角形左右边的点的(1/Z)*nx值,
		left_znx = (float)(y - y1) *(Z2_Camera_Rec*nx2 - Z1_Camera_Rec*nx1) / (float)(y2 - y1) + Z1_Camera_Rec*nx1;
		right_znx = (float)(y - y1) *(Z3_Camera_Rec*nx3 - Z1_Camera_Rec*nx1) / (float)(y3 - y1) + Z1_Camera_Rec*nx1;

		//三角形左右边的点的(1/Z)*ny值,
		left_zny = (float)(y - y1) *(Z2_Camera_Rec*ny2 - Z1_Camera_Rec*ny1) / (float)(y2 - y1) + Z1_Camera_Rec*ny1;
		right_zny = (float)(y - y1) *(Z3_Camera_Rec*ny3 - Z1_Camera_Rec*ny1) / (float)(y3 - y1) + Z1_Camera_Rec*ny1;

		//三角形左右边的点的(1/Z)*nz值,
		left_znz = (float)(y - y1) *(Z2_Camera_Rec*nz2 - Z1_Camera_Rec*nz1) / (float)(y2 - y1) + Z1_Camera_Rec*nz1;
		right_znz = (float)(y - y1) *(Z3_Camera_Rec*nz3 - Z1_Camera_Rec*nz1) / (float)(y3 - y1) + Z1_Camera_Rec*nz1;

		//求左右两边的扫描长度因此下面请注意
		float XLength = (right_x - left_x);
	
	
		if (XLength != 0.0f)
		{

			//计算ZU,ZV,Z随每单位X距离变化而变化的长度
			ZUOneStep = (right_zu - left_zu) / XLength;
			ZVOneStep = (right_zv - left_zv) / XLength;
			ZOneStep = (right_z - left_z) / XLength;
			ZNXOneStep = (right_znx - left_znx) / XLength;
			ZNYOneStep = (right_zny - left_zny) / XLength;
			ZNZOneStep = (right_znz - left_znz) / XLength;

			//通过插值得到扫描线上每个像素的ZU,ZV,Z,ZNX,ZNY,ZNZ值
			//注意x>=0&&x<=screen_w
			for (x = left_x + 1, ZU = left_zu, ZV = left_zv, Z = left_z, ZNX = left_znx, ZNY = left_zny, ZNZ = left_znz; x <= right_x&&x >= 0 && x <= screen_w; ++x, ZU += ZUOneStep, ZV += ZVOneStep, Z += ZOneStep, ZNX += ZNXOneStep, ZNY += ZNYOneStep, ZNZ += ZNZOneStep)
			{
				//求出像素的Z缓冲, 0.0=<zBuffer<=1.0
				float zBuffer = Z*B + A;

				//Z缓存测试
				if (zBuffer >= DepthBuffer[x][y])
				{
					continue;
				}
				//相除求出像素的纹理坐标和向量
				float u = ZU / Z;
				float v = ZV / Z;
		
				float nx = ZNX / Z;
				float ny = ZNY / Z;
				float nz = ZNZ / Z;
				Vector NormalVec = BuildVector(nx, ny, nz);

				float texPosX = u*texture_w;
				float texPosY = v*texture_h;
				int texPosX_Int = int(texPosX);
				int texPosY_Int = int(texPosY);
				float texPosX_Frac = texPosX - texPosX_Int;
				float texPosY_Frac = texPosY - texPosY_Int;
				byte r0, g0, b0;
				byte r1, g1, b1;
				byte r2, g2, b2;
				byte r3, g3, b3;
				byte r, g, b;
				float LightR, LightG, LightB;
				Vector DiffuseColor;



				//防止访问纹理缓存超出界限
				if (texPosX_Int == texture_w || texPosY_Int == texture_h)
					continue;
				//纹理贴图的非边缘部分可进行双线性插值
				else if ((texPosX_Int + 1) < texture_w && (texPosY_Int + 1) < texture_h)
				{

					//Po像素
					b0 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 1) * 3 - 1];
					g0 = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 2];
					r0 = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 3];

					//p1像素
					b1 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 2) * 3 - 1];
					g1 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 2) * 3 - 2];
					r1 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 2) * 3 - 3];

					//p3像素
					b2 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 2) * 3 - 1];
					g2 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 2) * 3 - 2];
					r2 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 2) * 3 - 3];

					//p4像素
					b3 = TextureBuffer[(texPosY_Int + 1) * texture_w * 3 + (texPosX_Int + 1) * 3 - 1];
					g3 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 1) * 3 - 2];
					r3 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 1) * 3 - 3];

					b = b0*(1.0f - texPosX_Frac)*(1 - texPosY_Frac) + b1* texPosX_Frac*(1 - texPosY_Frac) + b2*texPosX_Frac*texPosY_Frac + b3*(1.0f - texPosX_Frac)*texPosY_Frac;
					g = g0*(1.0f - texPosX_Frac)*(1 - texPosY_Frac) + g1* texPosX_Frac*(1 - texPosY_Frac) + g2*texPosX_Frac*texPosY_Frac + g3*(1.0f - texPosX_Frac)*texPosY_Frac;
					r = r0*(1.0f - texPosX_Frac)*(1 - texPosY_Frac) + r1* texPosX_Frac*(1 - texPosY_Frac) + r2*texPosX_Frac*texPosY_Frac + r3*(1.0f - texPosX_Frac)*texPosY_Frac;
				}

				//纹理贴图的边缘部分可进行点插值
				else
				{
					b = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 1) * 3 - 1];
					g = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 2];
					r = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 3];
				}

				//规格化法向量
				VectorNormalize(&NormalVec);

				//对单位法向量和漫反射光的反方向进行点乘
				float DiffuseFactor = VectorDotProduct(&NormalVec, &DiffuseDirRevse);

				//赋予环境光
				LightR = AmvientLight.x;
				LightG = AmvientLight.y;
				LightB = AmvientLight.z;

				//求出每个像素的漫反射光
				DiffuseColor.x = Clamp(DiffuseFactor*DiffuseLight.x, 0, 255.0f);
				DiffuseColor.y = Clamp(DiffuseFactor*DiffuseLight.y, 0, 255.0f);
				DiffuseColor.z = Clamp(DiffuseFactor*DiffuseLight.z, 0, 255.0f);

				//求出每个像素受到的总的光强(漫反射光强加上环境光强)
				LightR = Clamp(DiffuseColor.x + LightR, 0, 255.0f);
				LightG = Clamp(DiffuseColor.y + LightG, 0, 255.0f);
				LightB = Clamp(DiffuseColor.z + LightB, 0, 255.0f);

				//将光强因子缩小255
				LightR /= 255.0f;
				LightG /= 255.0f;
				LightB /= 255.0f;

				//最后用光强调节纹理颜色
				b = b*LightB;
				g = g*LightG;
				r = r*LightR;

				//更改背后缓存
				BackBuffer[int(y) *  screen_w * 3 + (int(x) + 1) * 3 - 1] = b;
				BackBuffer[int(y) *  screen_w * 3 + (int(x) + 1) * 3 - 2] = g;
				BackBuffer[int(y) *  screen_w * 3 + (int(x) + 1) * 3 - 3] = r;

				//更改深度缓存值颜色
				DepthBuffer[x][y] = zBuffer;
			}
		}
	}
}

 

 

 

光栅化平底三角形(透视纹理映射)的代码:

 

//用扫描线法绘制平底三角形,y2=y3
void DrawBottomTriangleInTexture(Vertex* p1, Vertex* p2, Vertex* p3)
{

	float A = SCREEN_FAR / (SCREEN_FAR - SCREEN_NEAR);
	float B = -(SCREEN_FAR* SCREEN_NEAR) / (SCREEN_FAR - SCREEN_NEAR);

	int x1 = p1->x;
	int y1 = p1->y;
	float u1 = p1->u;
	float v1 = p1->v;
	float z1 = p1->z;
	float nx1 = p1->nx;
	float ny1 = p1->ny;
	float nz1 = p1->nz;
	int  x2;
	int  y2;
	float u2;
	float v2;
	float z2;
	float nx2;
	float ny2;
	float nz2;
	int  x3;
	int  y3;
	float u3;
	float v3;
	float z3;
	float nx3;
	float ny3;
	float nz3;

	 //调整P2的参数在左边,而P3的参数在右边
	if (p2->x > p3->x)
	{
		x2 = p3->x;
		y2 = p3->y;
		z2 = p3->z;
		u2 = p3->u;
		v2 = p3->v;
		nx2 = p3->nx;
		ny2 = p3->ny;
		nz2 = p3->nz;

		x3 = p2->x;
		y3 = p2->y;
		z3 = p2->z;
		u3 = p2->u;
		v3 = p2->v;
		nx3 = p2->nx;
		ny3 = p2->ny;
		nz3 = p2->nz;
	}
	else
	{
		x2 = p2->x;
		y2 = p2->y;
		u2 = p2->u;
		v2 = p2->v;
		nx2 = p2->nx;
		ny2 = p2->ny;
		nz2 = p2->nz;

		z2 = p2->z;
		x3 = p3->x;
		y3 = p3->y;
		u3 = p3->u;
		v3 = p3->v;
		z3 = p3->z;
		nx3 = p3->nx;
		ny3 = p3->ny;
		nz3 = p3->nz;
	}

	//扫描线左右端点的值
	int  left_x;
	int  right_x;
	float left_zu;
	float right_zu;
	float left_zv;
	float right_zv;
	float left_z;
	float right_z;
	float left_znx;
	float right_znx;
	float left_zny;
	float right_zny;
	float left_znz;
	float right_znz;

	//每个变量在扫描线每步横跨的距离
	float ZVOneStep;
	float ZUOneStep;
	float ZOneStep;
	float ZNXOneStep;
	float ZNYOneStep;
	float ZNZOneStep;


	float ZU;
	float ZV;
	float Z;
    int x, y;
	float ZNX;
	float ZNY;
	float ZNZ;


	//求出各个顶点在相机空间的Z值的倒数
	float Z1_Camera_Rec = 1.0 / (B / (z1 - A));
	float Z2_Camera_Rec = 1.0 / (B / (z2 - A));
	float Z3_Camera_Rec = 1.0 / (B / (z3 - A));

	//循环里两点式分母不能为0
	if (y2 == y1 || y3 == y1)
	{
		return;
	}

	//判断循环条件不能缺少“=”
	//注意y>=0&&y<=screen_h
	for (y = y1; y >= 0 && y <= screen_h&&y <= y2; ++y)
	{
		//三角形左右边的点X,右边的X+1,来消除黑边,因为我发现是平顶或者平底三角形是右边缘绘制不到,所以加大右边的X值
		left_x = (y - y1) *(x2 - x1) / (y2 - y1) + x1;
		right_x = (y - y1) *(x3 - x1) / (y3 - y1) + x1 + 2;

		//三角形左右边的点的(1/Z)U值
		left_zu = (float)(y - y1) *(Z2_Camera_Rec*u2 - Z1_Camera_Rec*u1) / (float)(y2 - y1) + Z1_Camera_Rec*u1;
		right_zu = (float)(y - y1) *(Z3_Camera_Rec*u3 - Z1_Camera_Rec*u1) / (float)(y3 - y1) + Z1_Camera_Rec*u1;

		//三角形左右边的点的(1/Z)V值
		left_zv = (float)(y - y1) *(Z2_Camera_Rec*v2 - Z1_Camera_Rec*v1) / (float)(y2 - y1) + Z1_Camera_Rec*v1;
		right_zv = (float)(y - y1) *(Z3_Camera_Rec*v3 - Z1_Camera_Rec*v1) / (float)(y3 - y1) + Z1_Camera_Rec*v1;


		//三角形左右边的点的(1/Z)值,
		left_z = (float)(y - y1) *(Z2_Camera_Rec- Z1_Camera_Rec) / (float)(y2 - y1) + Z1_Camera_Rec;
		right_z = (float)(y - y1) *(Z3_Camera_Rec - Z1_Camera_Rec) / (float)(y3 - y1) + Z1_Camera_Rec;

		//三角形左右边的点的(1/Z)*nx值,
		left_znx = (float)(y - y1) *(Z2_Camera_Rec*nx2 - Z1_Camera_Rec*nx1) / (float)(y2 - y1) + Z1_Camera_Rec*nx1;
		right_znx = (float)(y - y1) *(Z3_Camera_Rec*nx3 - Z1_Camera_Rec*nx1) / (float)(y3 - y1) + Z1_Camera_Rec*nx1;

		//三角形左右边的点的(1/Z)*ny值,
		left_zny = (float)(y - y1) *(Z2_Camera_Rec*ny2 - Z1_Camera_Rec*ny1) / (float)(y2 - y1) + Z1_Camera_Rec*ny1;
		right_zny = (float)(y - y1) *(Z3_Camera_Rec*ny3 - Z1_Camera_Rec*ny1) / (float)(y3 - y1) + Z1_Camera_Rec*ny1;

		//三角形左右边的点的(1/Z)*nz值,
		left_znz = (float)(y - y1) *(Z2_Camera_Rec*nz2 - Z1_Camera_Rec*nz1) / (float)(y2 - y1) + Z1_Camera_Rec*nz1;
		right_znz = (float)(y - y1) *(Z3_Camera_Rec*nz3 - Z1_Camera_Rec*nz1) / (float)(y3 - y1) + Z1_Camera_Rec*nz1;

		//此时并不知道
		float XLength = right_x - left_x;

		if (XLength != 0.0f)
		{
			//计算ZU,ZV,Z,ZNX,ZNY,ZNZ随每单位X距离变化而变化的长度
			ZUOneStep = (right_zu - left_zu) / XLength;
			ZVOneStep = (right_zv - left_zv) / XLength;
			ZOneStep = (right_z - left_z) / XLength;
			ZNXOneStep = (right_znx - left_znx) / XLength;
			ZNYOneStep = (right_zny - left_zny) / XLength;
		    ZNZOneStep = (right_znz - left_znz) / XLength;

			//通过插值得到扫描线上每个像素的ZU,ZV,Z,ZNX,ZNY,ZNZ值
			//注意x>=0&&x<=screen_w
			for (x = left_x + 1, ZU = left_zu, ZV = left_zv, Z = left_z, ZNX = left_znx, ZNY = left_zny, ZNZ = left_znz; x <= right_x&&x >= 0 && x <= screen_w; ++x, ZU += ZUOneStep, ZV += ZVOneStep, Z += ZOneStep, ZNX += ZNXOneStep, ZNY += ZNYOneStep,ZNZ+=ZNZOneStep)
			{
				//求出像素的Z缓冲, 0.0=<zBuffer<=1.0
				float zBuffer = Z*B + A;

				//Z缓存测试
				if (zBuffer >= DepthBuffer[x][y])
				{
					continue;
				}

				//相除求出像素的纹理坐标和向量
				float u = ZU / Z;
				float v = ZV / Z;
				float nx = ZNX / Z;
				float ny = ZNY/ Z;
				float nz = ZNZ / Z;
				Vector NormalVec = BuildVector(nx, ny, nz);

				float texPosX = u*texture_w;
				float texPosY = v*texture_h;
				int texPosX_Int = int(texPosX);
				int texPosY_Int = int(texPosY);
				float texPosX_Frac = texPosX - texPosX_Int;
				float texPosY_Frac = texPosY - texPosY_Int;
				byte r0, g0, b0;
				byte r1, g1, b1;
				byte r2, g2, b2;
				byte r3, g3, b3;
				byte r, g, b;
				float LightR, LightG, LightB;
				Vector DiffuseColor;



				//防止访问纹理缓存超出界限
				if (texPosX_Int == texture_w || texPosY_Int == texture_h)
					continue;
				//纹理贴图的非边缘部分可进行双线性插值
				else if ((texPosX_Int + 1) < texture_w && (texPosY_Int + 1) < texture_h)
				{

					//Po像素
					b0 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 1) * 3 - 1];
					g0 = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 2];
					r0 = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 3];

					//p1像素
					b1 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 2) * 3 - 1];
					g1 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 2) * 3 - 2];
					r1 = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 2) * 3 - 3];

					//p3像素
					b2 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 2) * 3 - 1];
					g2 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 2) * 3 - 2];
					r2 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 2) * 3 - 3];

					//p4像素
					b3 = TextureBuffer[(texPosY_Int + 1) * texture_w * 3 + (texPosX_Int + 1) * 3 - 1];
					g3 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 1) * 3 - 2];
					r3 = TextureBuffer[(texPosY_Int + 1)* texture_w * 3 + (texPosX_Int + 1) * 3 - 3];

					b = b0*(1.0f - texPosX_Frac)*(1 - texPosY_Frac) + b1* texPosX_Frac*(1 - texPosY_Frac) + b2*texPosX_Frac*texPosY_Frac + b3*(1.0f - texPosX_Frac)*texPosY_Frac;
					g = g0*(1.0f - texPosX_Frac)*(1 - texPosY_Frac) + g1* texPosX_Frac*(1 - texPosY_Frac) + g2*texPosX_Frac*texPosY_Frac + g3*(1.0f - texPosX_Frac)*texPosY_Frac;
					r = r0*(1.0f - texPosX_Frac)*(1 - texPosY_Frac) + r1* texPosX_Frac*(1 - texPosY_Frac) + r2*texPosX_Frac*texPosY_Frac + r3*(1.0f - texPosX_Frac)*texPosY_Frac;
				}

				//纹理贴图的边缘部分可进行点插值
				else
				{
					b = TextureBuffer[texPosY_Int * texture_w * 3 + (texPosX_Int + 1) * 3 - 1];
					g = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 2];
					r = TextureBuffer[texPosY_Int* texture_w * 3 + (texPosX_Int + 1) * 3 - 3];
				}

				//规格化法向量
				VectorNormalize(&NormalVec);

				//对单位法向量和漫反射光的反方向进行点乘
				float DiffuseFactor = VectorDotProduct(&NormalVec, &DiffuseDirRevse);

				//赋予环境光
				LightR = AmvientLight.x;
				LightG = AmvientLight.y;
				LightB = AmvientLight.z;

				//求出每个像素的漫反射光
				DiffuseColor.x = Clamp(DiffuseFactor*DiffuseLight.x,0,255.0f);
				DiffuseColor.y = Clamp(DiffuseFactor*DiffuseLight.y, 0, 255.0f);
				DiffuseColor.z = Clamp(DiffuseFactor*DiffuseLight.z, 0, 255.0f);

				//求出每个像素受到的总的光强(漫反射光强加上环境光强)
				LightR = Clamp(DiffuseColor.x + LightR, 0, 255.0f);
				LightG = Clamp(DiffuseColor.y + LightG, 0, 255.0f);
				LightB = Clamp(DiffuseColor.z + LightB, 0, 255.0f);

				//将光强因子缩小255
				LightR /= 255.0f;
				LightG /= 255.0f;
				LightB /= 255.0f;

				//最后用光强调节纹理颜色
				b = b*LightB;
				g = g*LightG;
				r = r*LightR;

				//更改背后缓存颜色
				BackBuffer[int(y) *  screen_w * 3 + (int(x) + 1) * 3 - 1] = b;
				BackBuffer[int(y) *  screen_w * 3 + (int(x) + 1) * 3 - 2] = g;
				BackBuffer[int(y) *  screen_w * 3 + (int(x) + 1) * 3 - 3] = r;

				//更改深度缓存值
				DepthBuffer[x][y] = zBuffer;

			}
		}
	}
}

 

 

 

三,关于斜三角形的处理。

我们光栅化斜三角形时,是把一个斜三角形分为一个平顶三角形和平顶三角形的,如下面图所示:

 

 

 

 

斜三角形S1S2S3分为平底三角形s1s2s4和平顶三角形s3s2s4,那么新引入的点s4的纹理坐标,Z坐标,法向量等等怎么求?其实也得按照透视纹理映射的方法来求:

 

代码实现:

 

	//计算点s4的X,Y,Z,U,V,Z值  
		//(1/Zcamera)*U (1/Zcamera)*V  1/Zcamera ,(1/Zcamera)*nz,(1/Zcamera)*ny ,(1/Zcamera)*nz  与X和Y呈线性关系
		//先由Z缓存求出Z相机的值
		Vertex s4;
		s4.y = s2.y;
		s4.x = (s4.y - s1.y) *(s3.x - s1.x) / (s3.y - s1.y) + s1.x;

		//求出S1和S3的Z在相机空间的值
		float A = SCREEN_FAR / (SCREEN_FAR - SCREEN_NEAR);
		float B = -(SCREEN_FAR* SCREEN_NEAR) / (SCREEN_FAR - SCREEN_NEAR);


		//求出S1和S3的在相机空间的Z的倒数
		float ZCamera_S3_Rec = 1.0f / (B / (s3.z - A));
		float ZCamera_S1_Rec = 1.0f / (B / (s1.z - A));

		//求出S1和S3的 U/Zcamera, V/Camera, 1/Zcamera, nx/Zcamera,ny/Zcamera, nz/Zcamera, 
		float ZU_S3 = s3.u * ZCamera_S3_Rec;
		float ZU_S1 = s1.u * ZCamera_S1_Rec;
		float ZV_S3 = s3.v * ZCamera_S3_Rec;
		float ZV_S1 = s1.v * ZCamera_S1_Rec;
		float ZNX_S3 = s3.nx * ZCamera_S3_Rec;
		float ZNX_S1 = s1.nx * ZCamera_S1_Rec;
		float ZNY_S3 = s3.ny * ZCamera_S3_Rec;
		float ZNY_S1 = s1.ny * ZCamera_S1_Rec;
		float ZNZ_S3 = s3.nz * ZCamera_S3_Rec;
		float ZNZ_S1 = s1.nz * ZCamera_S1_Rec;
	

		//对(1/Zcamera)*U (1/Zcamera)*V  1/Zcamera进行线性插值
		float Z_S4= (s4.y - s1.y) *(ZCamera_S3_Rec - ZCamera_S1_Rec) / (s3.y - s1.y) + ZCamera_S1_Rec;
		float ZU_S4 = (s4.y - s1.y) *(ZU_S3 - ZU_S1) / (s3.y - s1.y) + ZU_S1;
		float ZV_S4 = (s4.y - s1.y) *(ZV_S3 - ZV_S1) / (s3.y - s1.y) + ZV_S1;
		float ZNX_S4 = (s4.y - s1.y) *(ZNX_S3 - ZNX_S1) / (s3.y - s1.y) + ZNX_S1;
		float ZNY_S4 = (s4.y - s1.y) *(ZNY_S3 - ZNY_S1) / (s3.y - s1.y) + ZNY_S1;
		float ZNZ_S4 = (s4.y - s1.y) *(ZNZ_S3 - ZNZ_S1) / (s3.y - s1.y) + ZNZ_S1;

		//求s4的U,V值
		s4.u = ZU_S4 / Z_S4;
		s4.v = ZV_S4 / Z_S4;
		s4.nx= ZNX_S4/ Z_S4;
		s4.ny = ZNY_S4 / Z_S4;
		s4.nz = ZNZ_S4 / Z_S4;
		

		//s4的Z值可以从Z相机的倒数计算出来 
		s4.z = Z_S4*B + A;

		//s1s2s4为平底三角形
		DrawBottomTriangleInTexture(&s1, &s2, &s4);

		//s3s4s2为平顶三角形
		DrawTopTriangleInTexture(&s3, &s2, &s4);

 

四,光栅化得到像素的属性位于哪个空间?

通过深入探索透视纹理映射(上)深入探索透视纹理映射(下)两篇技术博客,以及自己亲自实现一个软件光栅器,我明白了如果光栅化的三角形的顶点的属性位于哪个空间,那么相应的光栅化得到的像素的相应属性就是位于哪个空间,比如:如果我们构成三角形的顶点的法向量位于世界空间,则光栅化得到的像素的法向量则位于世界空间;

如果我们构成三角形的顶点的法向量位于局部空间,则光栅化得到的像素的法向量则位于局部空间;这就解释了D3D11的为什么只要光照方向和像素法向量只要是位于同一个空间计算,无论是局部空间还是世界空间,切线空间,甚至是相机空间,计算出的结果都是对的。

 

 

最终发出我们程序的截图:

 

 

按下“1”,为线框模式:

 

 

 

 

 

按下“2”,为Gouraud(也就是Per-Vertex-Shading)着色模式:

 

 

 

 

 

按下“3”,为phong(Per_Fragment_Shading)着色模式:

 

支持透视纹理映射

支持漫反射光和环境光

支持双采样滤波

支持Z缓存测试

 

 

 

 

 

 

 

 

 

下面放出我的整个项目源代码链接:

https://github.com/2047241149/SoftRender

http://download.csdn.net/detail/qq_29523119/9698519

 

 

 

 

 

 

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值