Note:UE5源码解析-数字人渲染篇

Note:UE5源码解析-数字人渲染篇

1.皮肤

关键文件

  1. Engine\Shaders\Private\SeparableSSS.ush 主要用来实现4S的Shader,主要是用在后处理是对屏幕空间做处理
  2. Engine\Source\Runtime\Engine\Private\Rendering\BurleyNormalizedSSS.cpp 主要计算高斯核
  3. Engine\Source\Runtime\Engine\Private\Rendering\SubsurfaceProfile.cpp 对纹理做预处理

论文:

  1. Approximate Reflectance Profiles for Efficient Subsurface Scattering https://graphics.pixar.com/library/ApproxBSSRDF/paper.pdf
  2. Separable Subsurface Scattering http://iryoku.com/separable-sss/
  3. Real-Time Realistic Skin Translucency http://www.iryoku.com/translucency/downloads/Real-Time-Realistic-Skin-Translucency.pdf

Engine\Source\Runtime\Engine\Private\Rendering\BurleyNormalizedSSS.cpp

这里是有一个纯C++的开源项目https://github.com/iryoku/separable-sss

//用两个指数函数的和除以距离R,近似的逼近漫反射曲线,见Approximate Reflectance Profiles for Efficient Subsurface Scattering论文
inline float Burley_ScatteringProfile(float r, float A,float S, float L)
{   //2PIR(r)r
	float D = 1 / S;
	float R = r / L;
	const float Inv8Pi = 1.0 / (8 * PI);
	float NegRbyD = -R / D;
	float RrDotR = A*FMath::Max((exp(NegRbyD) + exp(NegRbyD / 3.0)) / (D*L)*Inv8Pi, 0.0);
	return RrDotR;
}
//拟合曲线
float GetSearchLightDiffuseScalingFactor(float SurfaceAlbedo)
{
	return 3.5 + 100 * FMath::Pow(SurfaceAlbedo - 0.33, 4);
}
void ComputeMirroredBSSSKernel(FLinearColor* TargetBuffer, uint32 TargetBufferSize,
	FLinearColor SurfaceAlbedo, FLinearColor DiffuseMeanFreePath, float ScatterRadius)
{
	check(TargetBuffer);
	check(TargetBufferSize > 0);

	uint32 nNonMirroredSamples = TargetBufferSize;
	int32 nTotalSamples = nNonMirroredSamples * 2 - 1;

	//这里的计算见Approximate Reflectance Profiles for Efficient Subsurface Scattering这篇论文,公式(8)
	FVector ScalingFactor = GetSearchLightDiffuseScalingFactor(SurfaceAlbedo);
	
	// we could generate Out directly but the original code form SeparableSSS wasn't done like that so we convert it later
	// .a is in mm
	check(nTotalSamples < 64);

	FLinearColor kernel[64];
	{
		const float Range = (nTotalSamples > 20 ? 3.0f : 2.0f);
		// tweak constant
		const float Exponent = 2.0f;

		// Calculate the offsets:
		float step = 2.0f * Range / (nTotalSamples - 1);
		for (int i = 0; i < nTotalSamples; i++)
		{
			float o = -Range + float(i) * step;
			float sign = o < 0.0f ? -1.0f : 1.0f;
			kernel[i].A = Range * sign * FMath::Abs(FMath::Pow(o, Exponent)) / FMath::Pow(Range, Exponent);
		}
		// Center sample should always be zero, but might not be due to potential roundoff error.
		kernel[nTotalSamples / 2].A = 0.0f;

		//Scale the profile sampling radius. This scale enables the sampling between [-3*SpaceScale,+3*SpaceScale] instead of 
		//the default [-3,3] range when fetching kernel parameters.
		const float SpaceScale = ScatterRadius * 10.0f;// from cm to mm

		// Calculate the weights:
		for (int32 i = 0; i < nTotalSamples; i++)
		{
			float w0 = i > 0 ? FMath::Abs(kernel[i].A - kernel[i - 1].A) : 0.0f;
			float w1 = i < nTotalSamples - 1 ? FMath::Abs(kernel[i].A - kernel[i + 1].A) : 0.0f;
			float area = (w0 + w1) / 2.0f;
			
			//这里的计算见Approximate Reflectance Profiles for Efficient Subsurface Scattering这篇论文,公式(2)
			FVector t = area * Burley_ScatteringProfile(FMath::Abs(kernel[i].A)*SpaceScale, SurfaceAlbedo, ScalingFactor,DiffuseMeanFreePath);
			kernel[i].R = t.X;
			kernel[i].G = t.Y;
			kernel[i].B = t.Z;
		}

		// We still need to do a small tweak to get the radius to visually match. Multiplying by 4.0 seems to fix it.
		const float StepScale = 4.0f;
		for (int32 i = 0; i < nTotalSamples; i++)
		{
			kernel[i].A *= StepScale;
		}

		// We want the offset 0.0 to come first:
		FLinearColor t = kernel[nTotalSamples / 2];

		for (int i = nTotalSamples / 2; i > 0; i--)
		{
			kernel[i] = kernel[i - 1];
		}
		kernel[0] = t;

		// Normalize the weights in RGB
		{
			FVector sum = FVector(0, 0, 0);

			for (int i = 0; i < nTotalSamples; i++)
			{
				sum.X += kernel[i].R;
				sum.Y += kernel[i].G;
				sum.Z += kernel[i].B;
			}

			for (int i = 0; i < nTotalSamples; i++)
			{
				kernel[i].R /= sum.X;
				kernel[i].G /= sum.Y;
				kernel[i].B /= sum.Z;
			}
		}

		/* we do that in the shader for better quality with half res

		// Tweak them using the desired strength. The first one is:
		//     lerp(1.0, kernel[0].rgb, strength)
		kernel[0].R = FMath::Lerp(1.0f, kernel[0].R, SubsurfaceColor.R);
		kernel[0].G = FMath::Lerp(1.0f, kernel[0].G, SubsurfaceColor.G);
		kernel[0].B = FMath::Lerp(1.0f, kernel[0].B, SubsurfaceColor.B);

		for (int i = 1; i < nTotalSamples; i++)
		{
			kernel[i].R *= SubsurfaceColor.R;
			kernel[i].G *= SubsurfaceColor.G;
			kernel[i].B *= SubsurfaceColor.B;
		}*/
	}

	// generate output (remove negative samples)
	{
		// center sample
		TargetBuffer[0] = kernel[0];

		// all positive samples
		for (uint32 i = 0; i < nNonMirroredSamples - 1; i++)
		{
			TargetBuffer[i + 1] = kernel[nNonMirroredSamples + i];
		}
	}
}

Engine\Shaders\Private\SeparableSSS.ush 可分离的次表面散射(Separable Subsurface Scattering)

//-----------------------------------------------------------------------------
// Separable SSS Reflectance Pixel Shader

// @param texcoord The usual quad texture coordinates.
// @param dir Direction of the blur: First pass:   float2(1.0, 0.0), Second pass:  float2(0.0, 1.0)  调节横向和纵向
// @param initStencil indicates whether the stencil buffer should be initialized. Should be set to 'true' for the first pass if not previously initialized, to enable optimization of the second pass.
float4 SSSSBlurPS(uint2 BufferPos, float2 BufferUV, float2 dir, bool initStencil, float2 Extent)
{
    // Fetch color of current pixel:
    float4 colorM = SSSSSampleSceneColorPoint(BufferUV);

	// we store the depth in alpha
	float OutDepth = colorM.a;

	colorM.a = GetMaskFromDepthInAlpha(colorM.a);

	// we don't need to process pixels that are not part of the subsurafce scattering (optimization, also prevents divide by later on)
	BRANCH if(!colorM.a)
	{
		// todo: need to check for proper clear
//		discard;
		return 0.0f;
	}

	// 0..1
	float SSSStrength = GetSubsurfaceStrength(BufferUV);

#if !SSSS_COMPUTESHADER
	// Initialize the stencil buffer in case it was not already available:
    if (initStencil) // (Checked in compile time, it's optimized away)
        if (SSSStrength < 1 / 256.0f) discard;
#endif
	float SSSScaleX = SubsurfaceParams.x;

	float scale = SSSScaleX / OutDepth;

    // Calculate the final step to fetch the surrounding pixels:
    float2 finalStep = scale * dir;

	// ideally this comes from a half res buffer as well - there are some minor artifacts
	finalStep *= SSSStrength; // Modulate it using the opacity (0..1 range)
    
	FSeparableFilterParameters SeparableFilterParameters;

#if STRATA_ENABLED
	const FStrataSubsurfaceData SSSData = LoadStataSSSData(BufferUV);
	// For SSS_PROFILE_ID_PERPIXEL is managed throught the burley passes
	const uint SubsurfaceProfileInt = SSSData.bIsProfile ? SSSData.ProfileId : SSS_PROFILE_ID_PERPIXEL;
	SeparableFilterParameters.SubsurfaceProfileInt = SubsurfaceProfileInt;
	FillBurleyParameters(SSSData, SeparableFilterParameters);
#else
	const FGBufferData GBufferData = GetGBufferData(BufferUV);
	// 0..255, which SubSurface profile to pick
	// ideally this comes from a half res buffer as well - there are some minor artifacts
	const uint SubsurfaceProfileInt = ExtractSubsurfaceProfileInt(GBufferData);
	SeparableFilterParameters.SubsurfaceProfileInt = SubsurfaceProfileInt;
#endif

    // Accumulate the center sample:
    float3 colorAccum = 0;
	// >0 to avoid division by 0, not 100% correct to not visible
	float3 colorInvDiv = 0.00001f;

	// center sample
	half3  CentralKernelWeight = GetSubsurfaceProfileKernel(0, SeparableFilterParameters).rgb;

	colorInvDiv += CentralKernelWeight;
	colorAccum = colorM.rgb * CentralKernelWeight;
	float3 BoundaryColorBleed = GetSubsurfaceProfileBoundaryColorBleed(SubsurfaceProfileInt);

	// Accumulate the other samples:
    SSSS_UNROLL
	for (int i = 1; i < SSSS_N_KERNELWEIGHTCOUNT; i++) 
	{
		// Kernel.a = 0..SUBSURFACE_KERNEL_SIZE (radius)
		half4 Kernel = GetSubsurfaceProfileKernel(i, SeparableFilterParameters);

		float4 LocalAccum = 0;

		float2 UVOffset = Kernel.a * finalStep;
		// The kernel is symtrical, we want to use that property.
		// Half the GetSubsurfaceProfileKernel() calls (more expensive if done by texture)
		// Half the weighting computations (saves 3mul per lookup sample)
		SSSS_UNROLL
		for (int Side = -1; Side <= 1; Side += 2)
		{
			// Fetch color and depth for current sample:
			float2 LocalUV = BufferUV + UVOffset * Side;
			float4 color = SSSSSampleSceneColor(LocalUV);
			uint LocalSubsurfaceProfileInt = SSSSSampleProfileId(LocalUV);
			float3 ColorTint = LocalSubsurfaceProfileInt == SubsurfaceProfileInt ? 1.0f : BoundaryColorBleed;


			float LocalDepth = color.a;
			color.a = GetMaskFromDepthInAlpha(color.a);

#if SSSS_FOLLOW_SURFACE == 1
			// If the difference in depth is huge, we weight the sample less or not at all
			float s = saturate(12000.0f / 400000 * SubsurfaceParams.y *
	//        float s = saturate(300.0f/400000 * SubsurfaceParams.y *
				abs(OutDepth - LocalDepth));

			color.a *= 1 - s;
#endif
			// approximation, ideally we would reconstruct the mask with GetMaskFromDepthInAlpha() and do manual bilinear filter
			// needed?
			color.rgb *= color.a * ColorTint;

			// Accumulate left and right 
			LocalAccum += color;
		}

		// Accumulate to final value (left and right sample with the same weight)
		colorAccum += Kernel.rgb * LocalAccum.rgb;
		colorInvDiv += Kernel.rgb * LocalAccum.a;
	}

	// normalize (some samples are rejected because of depth or the other material is no SSS, compensate for that)
	// done for each color channel to avoid color shift
	float3 OutColor = colorAccum / colorInvDiv; 

	// alpha stored the SceneDepth (0 if there is no subsurface scattering)
    return float4(OutColor, OutDepth);
}

请添加图片描述
请添加图片描述
请添加图片描述

渲染流程:BSDF (散射)= BRDF(反射) + BTDF(透射)

1.通过得到的高斯核,对纹理进行预处理

2.经过着色后,对屏幕的像素做处理

2.毛发

关键文件:

  1. 物理模拟部分代码:Engine\Plugins\Runtime\HairStrands
  2. 渲染部分代码:Engine\Shaders\Private\HairStrandsEngine\Source\Runtime\Renderer\Private\HairStrands

论文:

  1. Light Scattering from Human Hair Fibers http://graphics.stanford.edu/papers/hair/hair-sg03final.pdf
  2. A Data-Driven Light Scattering Model for Hair https://graphics.pixar.com/library/DataDrivenHairScattering/paper.pdf
  3. Dual Scattering Approximation for Fast Multiple Scattering in Hair http://www.cemyuksel.com/research/dualscattering/dualscattering.pdf

Engine\Shaders\Private\HairBsdf.ush

float Hair_g(float B, float Theta)
{
	return exp(-0.5 * Pow2(Theta) / (B * B)) / (sqrt(2 * PI) * B);
}

float Hair_F(float CosTheta)
{
	const float n = 1.55;
	const float F0 = Pow2((1 - n) / (1 + n));
	return F0 + (1 - F0) * Pow5(1 - CosTheta);
}
///
// Hair BSDF
// Approximation to HairShadingRef using concepts from the following papers:
// [Marschner et al. 2003, "Light Scattering from Human Hair Fibers"]
// [Pekelis et al. 2015, "A Data-Driven Light Scattering Model for Hair"]

float3 HairShading( FGBufferData GBuffer, float3 L, float3 V, half3 N, float Shadow, FHairTransmittanceData HairTransmittance, float InBacklit, float Area, uint2 Random )
{
	// to prevent NaN with decals
	// OR-18489 HERO: IGGY: RMB on E ability causes blinding hair effect
	// OR-17578 HERO: HAMMER: E causes blinding light on heroes with hair
	float ClampedRoughness = clamp(GBuffer.Roughness, 1/255.0f, 1.0f);

	//const float3 DiffuseN	= OctahedronToUnitVector( GBuffer.CustomData.xy * 2 - 1 );
	const float Backlit	= min(InBacklit, HairTransmittance.bUseBacklit ? GBuffer.CustomData.z : 1);

#if HAIR_REFERENCE
	// todo: ClampedRoughness is missing for this code path
	float3 S = HairShadingRef( GBuffer, L, V, N, Random );
	//float3 S = HairShadingMarschner( GBuffer, L, V, N );
#else
	// N is the vector parallel to hair pointing toward root
	
	//论文Light Scattering from Human Hair Fibers给出的经验值定义,用于后面的R,TT,TRT计算
	
	const float VoL       = dot(V,L);                                                      
	const float SinThetaL = clamp(dot(N,L), -1.f, 1.f);
	const float SinThetaV = clamp(dot(N,V), -1.f, 1.f);
	float CosThetaD = cos( 0.5 * abs( asinFast( SinThetaV ) - asinFast( SinThetaL ) ) );

	//CosThetaD = abs( CosThetaD ) < 0.01 ? 0.01 : CosThetaD;

	const float3 Lp = L - SinThetaL * N;
	const float3 Vp = V - SinThetaV * N;
	const float CosPhi = dot(Lp,Vp) * rsqrt( dot(Lp,Lp) * dot(Vp,Vp) + 1e-4 );
	const float CosHalfPhi = sqrt( saturate( 0.5 + 0.5 * CosPhi ) );
	//const float Phi = acosFast( CosPhi );
	
	float n = 1.55;
	//float n_prime = sqrt( n*n - 1 + Pow2( CosThetaD ) ) / CosThetaD;
	float n_prime = 1.19 / CosThetaD + 0.36 * CosThetaD;

	float Shift = 0.035;
	float Alpha[] =
	{
		-Shift * 2,
		Shift,
		Shift * 4,
	};	
	float B[] =
	{
		Area + Pow2(ClampedRoughness),
		Area + Pow2(ClampedRoughness) / 2,
		Area + Pow2(ClampedRoughness) * 2,
	};

	float3 S = 0;
	//Mp是纵向散射函数,Np是方位角散射函数,Fp是菲涅尔函数,Tp是吸收函数
	//R
	if (HairTransmittance.ScatteringComponent & HAIR_COMPONENT_R)
	{
		const float sa = sin(Alpha[0]);
		const float ca = cos(Alpha[0]);
		float Shift = 2 * sa * (ca * CosHalfPhi * sqrt(1 - SinThetaV * SinThetaV) + sa * SinThetaV);
		float BScale = HairTransmittance.bUseSeparableR ? sqrt(2.0) * CosHalfPhi : 1;
		float Mp = Hair_g(B[0] * BScale, SinThetaL + SinThetaV - Shift);
		float Np = 0.25 * CosHalfPhi;
		float Fp = Hair_F(sqrt(saturate(0.5 + 0.5 * VoL)));
		S += Mp * Np * Fp * (GBuffer.Specular * 2) * lerp(1, Backlit, saturate(-VoL));
	}

	// TT
	if (HairTransmittance.ScatteringComponent & HAIR_COMPONENT_TT)
	{
		float Mp = Hair_g( B[1], SinThetaL + SinThetaV - Alpha[1] );

		float a = 1 / n_prime;
		//float h = CosHalfPhi * rsqrt( 1 + a*a - 2*a * sqrt( 0.5 - 0.5 * CosPhi ) );
		//float h = CosHalfPhi * ( ( 1 - Pow2( CosHalfPhi ) ) * a + 1 );
		float h = CosHalfPhi * ( 1 + a * ( 0.6 - 0.8 * CosPhi ) );
		//float h = 0.4;
		//float yi = asinFast(h);
		//float yt = asinFast(h / n_prime);
		
		float f = Hair_F( CosThetaD * sqrt( saturate( 1 - h*h ) ) );
		float Fp = Pow2(1 - f);
		//float3 Tp = pow( GBuffer.BaseColor, 0.5 * ( 1 + cos(2*yt) ) / CosThetaD );
		//float3 Tp = pow( GBuffer.BaseColor, 0.5 * cos(yt) / CosThetaD );
		float3 Tp = 0;
		if (HairTransmittance.bUseLegacyAbsorption)
		{
			Tp = pow(abs(GBuffer.BaseColor), 0.5 * sqrt(1 - Pow2(h * a)) / CosThetaD);
		}
		else
		{
			// Compute absorption color which would match user intent after multiple scattering
			const float3 AbsorptionColor = HairColorToAbsorption(GBuffer.BaseColor);
			Tp = exp(-AbsorptionColor * 2 * abs(1 - Pow2(h * a) / CosThetaD));
		}

		//float t = asin( 1 / n_prime );
		//float d = ( sqrt(2) - t ) / ( 1 - t );
		//float s = -0.5 * PI * (1 - 1 / n_prime) * log( 2*d - 1 - 2 * sqrt( d * (d - 1) ) );
		//float s = 0.35;
		//float Np = exp( (Phi - PI) / s ) / ( s * Pow2( 1 + exp( (Phi - PI) / s ) ) );
		//float Np = 0.71 * exp( -1.65 * Pow2(Phi - PI) );
		float Np = exp( -3.65 * CosPhi - 3.98 );

		S += Mp * Np * Fp * Tp * Backlit;
	}

	// TRT
	if (HairTransmittance.ScatteringComponent & HAIR_COMPONENT_TRT)
	{
		float Mp = Hair_g( B[2], SinThetaL + SinThetaV - Alpha[2] );
		
		//float h = 0.75;
		float f = Hair_F( CosThetaD * 0.5 );
		float Fp = Pow2(1 - f) * f;
		//float3 Tp = pow( GBuffer.BaseColor, 1.6 / CosThetaD );
		float3 Tp = pow(abs(GBuffer.BaseColor), 0.8 / CosThetaD );

		//float s = 0.15;
		//float Np = 0.75 * exp( Phi / s ) / ( s * Pow2( 1 + exp( Phi / s ) ) );
		float Np = exp( 17 * CosPhi - 16.78 );

		S += Mp * Np * Fp * Tp;
	}
#endif

	if (HairTransmittance.ScatteringComponent & HAIR_COMPONENT_MULTISCATTER)
	{
		S  = EvaluateHairMultipleScattering(HairTransmittance, ClampedRoughness, S);
		S += KajiyaKayDiffuseAttenuation(GBuffer, L, V, N, Shadow);
	}

	S = -min(-S, 0.0);
	return S;
}

请添加图片描述
请添加图片描述

HairStrands(Marschner模型)S=反射(R)+透射(TT)+次反射(TRT)

参考链接:

  1. 《GPU Gems 3》:真实感皮肤渲染技术总结 https://zhuanlan.zhihu.com/p/42433792

  2. Approximate Reflectance Profiles for Efficient Subsurface Scattering https://graphics.pixar.com/library/ApproxBSSRDF/paper.pdf

  3. Separable Subsurface Scattering http://iryoku.com/separable-sss/

  4. Real-Time Realistic Skin Translucency http://www.iryoku.com/translucency/downloads/Real-Time-Realistic-Skin-Translucency.pdf

  5. UE4 Hair Strands浅析 https://zhuanlan.zhihu.com/p/128669105

  6. Light Scattering from Human Hair Fibers http://graphics.stanford.edu/papers/hair/hair-sg03final.pdf

  7. A Data-Driven Light Scattering Model for Hair https://graphics.pixar.com/library/DataDrivenHairScattering/paper.pdf

  8. Dual Scattering Approximation for Fast Multiple Scattering in Hair http://www.cemyuksel.com/research/dualscattering/dualscattering.pdf
    tp://www.iryoku.com/translucency/downloads/Real-Time-Realistic-Skin-Translucency.pdf

  • 1
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值