积雪的话在后处理阶段不能用偏导数
如果你问gpt不用偏导数如何计算当前像素的法线,它会告诉你计算周围点的坐标
感觉跟坡度坡向差不多 那就试试吧
目前的效果咋说呢 感觉也对 但是还是有些小瑕疵 在快速缩放时候就能看出来
计算深度值那里我也是来回测试 目前这种是最好的结果
/**
* @description: 积雪效果
* @param {*}
* @return {*}
*/
import * as Cesium from "cesium";
export default class SnowCoverStageEffect {
constructor(viewer, options) {
if (!viewer) throw new Error("no viewer object!");
options = options || {};
this.viewer = viewer;
this.alpha = 1.0;
this.init();
}
init() {
this.snowcoverStage = new Cesium.PostProcessStage({
name: "czm_snowcover",
fragmentShader: this.snowcover(),
uniforms: {
alpha: () => {
return this.alpha;
},
},
});
this.viewer.scene.postProcessStages.add(this.snowcoverStage);
}
destroy() {
if (!this.viewer || !this.snowcoverStage) return;
this.viewer.scene.postProcessStages.remove(this.snowcoverStage);
Cesium.destroyObject(this.snowcoverStage);
}
show(visible) {
this.snowcoverStage.enabled = visible;
}
changeAlpha(value) {
this.snowcoverStage.uniforms.alpha = value;
}
snowcover() {
return `
uniform float alpha;
uniform sampler2D colorTexture;
uniform sampler2D depthTexture;
in vec2 v_textureCoordinates;
vec4 toEye(in vec2 uv,in float depth){
vec2 xy = vec2((uv.x*2.-1.),(uv.y*2.-1.));
vec4 posInCamera = czm_inverseProjection * vec4(xy,depth,1.);
posInCamera = posInCamera / posInCamera.w;
return posInCamera;
}
float getDepth(in vec4 depth){
float z_window = czm_unpackDepth(depth);
z_window = czm_reverseLogDepth(z_window);
float n_range = czm_depthRange.near;
float f_range = czm_depthRange.far;
return(2.*z_window-n_range-f_range)/(f_range-n_range);
}
void main(void)
{
vec4 white = vec4(1.0,1.0,1.0,1.0);
vec4 color = texture(colorTexture, v_textureCoordinates);
vec4 originDepth = texture(depthTexture, v_textureCoordinates);
float depth = czm_unpackDepth(originDepth);
vec4 positionEC = toEye(v_textureCoordinates,depth);
vec4 positionWC = czm_inverseView * positionEC;
float padx = czm_pixelRatio / czm_viewport.z;
float pady = czm_pixelRatio / czm_viewport.w;
float depth2 = getDepth(texture(depthTexture, v_textureCoordinates + vec2(-padx,0.)));
vec4 eyeCoordinates2 = toEye(v_textureCoordinates + vec2(-padx,0.),depth2);
float depth3 = getDepth(texture(depthTexture, v_textureCoordinates + vec2(padx,0.)));
vec4 eyeCoordinates3 = toEye(v_textureCoordinates + vec2(padx,0.),depth3);
float depth4 = getDepth(texture(depthTexture, v_textureCoordinates + vec2(0.,-pady)));
vec4 eyeCoordinates4 = toEye(v_textureCoordinates + vec2(0.,-pady),depth4);
float depth5 = getDepth(texture(depthTexture, v_textureCoordinates + vec2(0.,pady)));
vec4 eyeCoordinates5 = toEye(v_textureCoordinates + vec2(0.,pady),depth5);
vec3 dx = eyeCoordinates3.xyz - eyeCoordinates2.xyz;
vec3 dy = eyeCoordinates4.xyz - eyeCoordinates5.xyz;
vec3 normalEC = normalize(cross(dy,dx));
vec3 normalWC = czm_inverseViewRotation * normalEC;
vec3 upWC = normalize(positionWC.xyz);
float angle = dot(upWC,normalWC);
if(depth>=1.0){
out_FragColor = color;
}else{
// out_FragColor = mix(color,white,(angle<=0.1?0.1:angle)*alpha);
out_FragColor = mix(color,white,clamp(angle,0.,1.*alpha));
}
}`;
}
}