上次的Demo做了一个单独的体绘制效果,稍微改动下就可以做一些很基础的三维标量数据可视化了。我们在渲染海洋标量场的时候,有时候需要加上个地形之类的东西,这个地形是不透明的,有时会穿插在数据区域中,比如一些岛屿。
这次我们在场景中加一点不透明的物体。
其关键是要在最后的颜色输出处加上一个opaqueVisiblity * opaqueColor
return min(volumetricColor, 1.0f) + opaqueVisiblity * opaqueColor;
opaqueVisiblity是不透明物体的可见度,如果这个不透明物体在数据区域后面,它反射的光线会被volume不断吸收。opaqueColor是其反射的颜色,也可以让它自发光,那样就更简单一点。
还记得单独绘制volume的步骤吗?
1.确定光线发射点和发射方向
2.找到光线与volume的交点,也就是光线在Volume上的入射点
3.从Volume上的入射点开始RayMarching,直到离开数据体
这次我们需要在1和2中间加一步,确定光线与不透明物体的交点,这样我们在第二部中就可以线判断一下,如果光线先碰到不透明物体,那么就不需要做第三步的RayMarching了,直接用phong或其他光照模型做个简单的渲染即可。如果光线先碰到了volume,则在RayMarching的时候需要多计算一个不透明物体的可见度opaqueVisiblity,它会随着光线步进不断衰减。
这样,我们的思路就很清晰了:
1.确定光线发射点和发射方向
2.找到光线与不透明物体的交点
3.找到光线与volume的交点
4.1若光线先与不透明物体相交,则直接Phong
4.2若光线先与Volume相交,则RayMarching,计算Volume的颜色,同时计算直接opaqueVisiblity,然后用Phong画不透明物体,最后叠加。
接下来就是代码了
先看Render(…)函数,在RayMarching之前加一个
float opaqueVisiblity = 1.0f;
在Volume中进行Ray Marching的时候加一句
opaqueVisiblity *= BeerLambert(ABSORPTION_COEFFICIENT * value, marchSize);
衰减程度只与该点的密度也就是数据值和步进长度有关,其实还应考虑吸收的光,毕竟有衰减就有吸收,吸收的量应当加在volume的自发光里?但这里就不考虑那么多了,先按简单的来,看看效果。
当然,值计算opaqueVisiblity是不行的,还有opaqueColor。
opaqueColor的计算分两部步走,先计算相交点以及物体在此处的法线。
float IntersectSphere(in vec3 rayOrigin, in vec3 rayDirection, out vec3 normal)
{
normal = vec3(0, 0, 0);
vec3 eMinusC = rayOrigin - mSphere.Position;
float dDotD = dot(rayDirection, rayDirection);
float discriminant = dot(rayDirection, (eMinusC)) * dot(rayDirection, (eMinusC))
- dDotD * (dot(eMinusC, eMinusC) - mSphere.Radius * mSphere.Radius);
if (discriminant < 0.0)
return -1.0;
float firstIntersect = (dot(-rayDirection, eMinusC) - sqrt(discriminant))
/ dDotD;
float t = firstIntersect;
normal = normalize(rayOrigin + rayDirection * t - mSphere.Position);
return t;
}
还是因为没有顶点数据,这里在PixelShader中虚构了一个球。由顶点数据的话不需要如此复杂。
然后在RayMarching结束后计算光照:
//https://www.shadertoy.com/view/wssBR8
vec3 Diffuse(in vec3 normal, in vec3 lightVec, in vec3 diffuse)
{
float nDotL = dot(normal, lightVec);
return clamp(nDotL * diffuse, 0.0, 1.0);
}
void CalculateLighting(vec3 position, vec3 normal, vec3 reflectionDirection, inout vec3 color)
{
vec3 lightDirection = vec3(-1.0,1.0,-1.0);//方向光
float lightDistance = length(lightDirection);
lightDirection /= lightDistance;//normalize
vec3 lightColor = vec3(1.0,.0,.0);
color += lightColor * pow(max(dot(reflectionDirection, lightDirection), 0.0), 4.0);
color += lightColor * Diffuse(normal, lightDirection, vec3(1.0));
color += GetAmbientLight() * vec3(1.0);
}
if( opaqueDepth>.0 && opaqueVisiblity > ABSORPTION_CUTOFF)//碰到了不透明物体且反射光线没被完全吸收
{
vec3 position = rayOrigin + opaqueDepth * rayDirection;
vec3 reflectionDirection = reflect( rayDirection, normal);
CalculateLighting(position, normal, reflectionDirection, opaqueColor);
}
一定要注意的是,在RayMarching过程中还应加上一句:
if(volumeDepth>MaxDepth)//在离开Volume之前先碰到了不透明物体,则停止计算
continue;
如果提前碰到了不透明物体,需要停止RayMarching。
以下是简化后的全部代码,也可在VolumeRendering + OpaqueSphere查看完整的效果。
#define LARGE_NUMBER 1e20
#define MAX_VOLUME_MARCH_STEPS 160//在Volume中光线步进的最大次数
#define MAX_SDF_DETECT_STEPS 15//最大探测次数,用于确定物体边界
#define MARCH_STRIDE 0.4//固定的光线步长(在Volume中)
#define ABSORPTION_COEFFICIENT 0.5
#define ABSORPTION_CUTOFF 0.25
struct Sphere{
vec3 Position;
float Radius;
};
Sphere mSphere = Sphere(
vec3(0,0,0),
8.0
);
struct Box{
vec3 Position;
vec3 EdgeLength;
};
Box mBox = Box(
vec3(0,0,0),
vec3(16.0)
);
struct Camera
{
vec3 Position;//
vec3 LookAt;
float ImageHeight;//成像高度//根据屏幕长宽比可求宽度
float FocalDistance;//焦距
};
Camera mCamera = Camera(
vec3(120, 20, -165),
vec3(0, 0, 0),
2.0,
7.0
);
float IntersectSphere(in vec3 rayOrigin, in vec3 rayDirection, out vec3 normal)
{
normal = vec3(0, 0, 0);
vec3 eMinusC = rayOrigin - mSphere.Position;
float dDotD = dot(rayDirection, rayDirection);
float discriminant = dot(rayDirection, (eMinusC)) * dot(rayDirection, (eMinusC))
- dDotD * (dot(eMinusC, eMinusC) - mSphere.Radius * mSphere.Radius);
if (discriminant < 0.0)
return -1.0;
float firstIntersect = (dot(-rayDirection, eMinusC) - sqrt(discriminant))
/ dDotD;
float t = firstIntersect;
normal = normalize(rayOrigin + rayDirection * t - mSphere.Position);
return t;
}
//https://iquilezles.org/www/articles/distfunctions/distfunctions.htm
float sdBox( vec3 p /*到中心的距离*/, vec3 b/*边长*/ )
{
vec3 q = abs(p) - b;
return length(max(q,0.0)) + min(max(q.x,max(q.y,q.z)),0.0);
}
float QueryVolumetricDistanceField( in vec3 pos)
{
float sdfValue= sdBox( pos- mBox.Position, mBox.EdgeLength);
return sdfValue;
}
float IntersectRayMarch(in vec3 rayOrigin, in vec3 rayDirection, float maxD)//用于确定物体边界
{
float precis = MARCH_STRIDE; //这个值太大,会导致边界判断不精确,导致Volume表面有很多曲线形的分界线
float D = 0.0f;
for(int i=0; i<MAX_SDF_DETECT_STEPS; i++ )
{
float dis = QueryVolumetricDistanceField( rayOrigin + rayDirection * D);
if( dis < precis || D > maxD )
break;
D += dis;
}
return D >= maxD ? -1.0 : D;//没有碰到物体则返回-1,否则返回深度(据摄像机距离)
}
vec3 CameraOrbit(float speedRatio)//相机在轨道上旋转
{
float theta = iTime * speedRatio;
float radius = 165.0;
return vec3(radius * cos(theta), mCamera.Position.y + 50.0 /** sin(theta*4.0)*/, radius * sin(theta));
}
void SetCamera(in vec2 _uv, in float _aspectRatio, out vec3 _rayOrigin, out vec3 _rayDirection)
{
float ImageWidth = mCamera.ImageHeight * _aspectRatio;
//vec3 ImagePosition = mCamera.Position;
vec3 ImagePosition = CameraOrbit(0.3);
vec3 CameraView = mCamera.LookAt - ImagePosition;
float ViewLength = length(CameraView);
vec3 CameraViewDir = CameraView / ViewLength;
vec3 CameraRight = cross(CameraViewDir, vec3(0, 1, 0));
vec3 CameraUp = cross(CameraRight, CameraViewDir);
vec3 focalPoint = ImagePosition - mCamera.FocalDistance * CameraViewDir;//焦点位置
vec3 ImagePoint = ImagePosition;//用Image的中心点初始化成像点
//根据uv坐标偏移成像点
ImagePoint += CameraRight * (_uv.x * 2.0 - 1.0) * ImageWidth *.5;
ImagePoint += CameraUp * (_uv.y * 2.0 - 1.0) * mCamera.ImageHeight *.5;
_rayOrigin = focalPoint;
_rayDirection = normalize(ImagePoint - focalPoint);
}
vec3 GetAmbientLight()
{
return vec3(0.03);
}
//https://www.shadertoy.com/view/wssBR8
float BeerLambert(float absorption, float dist)
{
return exp(-absorption * dist);
}
float GetVolumeValue(vec3 pos)
{
float scale = 96.0;//32.0*(2.0+sin(iTime));
vec3 conner = mBox.Position-mBox.EdgeLength/2.0;
float value = texture(iChannel0, (pos-conner)/scale).x;//选取3D纹理的一部分进行采样
float target = 0.5+0.2 * sin(iTime);
if(value<target||value>target+0.1)
value =0.0;
return value;
}
//https://www.shadertoy.com/view/wssBR8
vec3 Diffuse(in vec3 normal, in vec3 lightVec, in vec3 diffuse)
{
float nDotL = dot(normal, lightVec);
return clamp(nDotL * diffuse, 0.0, 1.0);
}
void CalculateLighting(vec3 position, vec3 normal, vec3 reflectionDirection, inout vec3 color)
{
vec3 lightDirection = vec3(-1.0,1.0,-1.0);//方向光
float lightDistance = length(lightDirection);
lightDirection /= lightDistance;//normalize
vec3 lightColor = vec3(1.0,.0,.0);
color += lightColor * pow(max(dot(reflectionDirection, lightDirection), 0.0), 4.0);
color += lightColor * Diffuse(normal, lightDirection, vec3(1.0));
color += GetAmbientLight() * vec3(1.0);
}
vec3 Render( in vec3 rayOrigin, in vec3 rayDirection)
{
vec3 normal;
float opaqueDepth = IntersectSphere(rayOrigin, rayDirection, normal);
float MaxDepth = opaqueDepth<.0?LARGE_NUMBER:opaqueDepth;
//找到volume的边界(若有外部传入的模型,可以直接得到深度。这里的模型是在shader中用代码生成的,所以使用RayMarch的方式确定边界)
float volumeDepth = IntersectRayMarch(rayOrigin, rayDirection, MaxDepth);
vec3 volumetricColor = vec3(0.0f);
vec3 opaqueColor = vec3(0.0f);
float opaqueVisiblity = 1.0f;
//从Volume的边界开始RayMarch
if(volumeDepth > 0.0)//若可以触碰到Volume(没有被其他物体遮挡或Volume不在这条路径上)
{
float signedDistance = .0;
for(int i = 0; i < MAX_VOLUME_MARCH_STEPS; i++)
{
volumeDepth += max(MARCH_STRIDE, signedDistance);//若还没有到达Volume边界,则先步进到边界处
// volumeDepth += MARCH_STRIDE;
if(volumeDepth>MaxDepth)
continue;
vec3 position = rayOrigin + volumeDepth * rayDirection;
signedDistance = QueryVolumetricDistanceField(position);
if(signedDistance < 0.0f)//在Volume中
{
float value = GetVolumeValue(position);
opaqueVisiblity *= BeerLambert(ABSORPTION_COEFFICIENT * value, MARCH_STRIDE);
volumetricColor += value * GetAmbientLight();
}
}
}
if( opaqueDepth>.0 && opaqueVisiblity > ABSORPTION_CUTOFF)
{
vec3 position = rayOrigin + opaqueDepth*rayDirection;
vec3 reflectionDirection = reflect( rayDirection, normal);
CalculateLighting(position, normal, reflectionDirection, opaqueColor);
}
return min(volumetricColor, 1.0f) + opaqueVisiblity * opaqueColor;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 uv = fragCoord.xy / iResolution.xy;
float aspectRatio = iResolution.x / iResolution.y;
vec3 rayOrigin,rayDirection;
SetCamera(
uv, aspectRatio, //输入
rayOrigin, rayDirection//输出
);
vec3 color = Render(rayOrigin, rayDirection);
fragColor=vec4( color, 1.0 );
}