随缘记录开发的部分自己较为喜欢的效果:
1.RGB + 抖动 + 缩放
Shader实现流程(主要步骤)
1.色散效果
色彩的本质其实就是rgb三个通道的组合,因此实现色散效果,将RGB通道分别按照要求进行偏移即可
varying vec2 vTexCoord;
uniform sampler2D uTexture0;
uniform float textureWidth;
uniform float textureHeight;
void main() {
//纹理元素
vec2 iResolution = vec2(textureWidth,textureHeight);
float ChromaticAberration = 0.5 / 10.0 + 15.0;
vec2 uv = vTexCoord.xy;
vec2 texel = 1.0 / iResolution.xy;
vec2 coords = (uv - 0.5) * 2.0;
float coordDot = dot(coords, coords);
vec2 precompute = coordDot * coords * ChromaticAberration;
vec2 uvR = uv; //+ 0.2 * texel.xy * precompute;
vec2 uvB = uv + texel.xy * precompute;
vec4 color;
color.r = texture2D(uTexture0,uvR).r;
color.g = texture2D(uTexture0,uv).g;
color.b = texture2D(uTexture0,uvB).b;
color.a = 1.0;
gl_FragColor = color;
}
2.缩放
缩放和旋转类似,只要该按比例改变四个传入的四个顶点坐标的值即可
function Mat2f:scale(x, y)
self.m[1] = self.m[1] * x;
self.m[2] = self.m[2] * y;
self.m[3] = self.m[3] * x;
self.m[4] = self.m[4] * y;
end
3.径向模糊
什么是径向模糊?
径向模糊,是一种从中心向外呈幅射状的逐渐模糊的效果,径向模糊的特点是从某个像素为中心向外辐射状扩散,因此需要采样的像素在原像素和中间点像素的连线上,不同连线上的点不会相互影响。 简单的说,就是像素的颜色是由该像素的点与中心点之间连线上进行采样,然后求将这些采样点颜色的加权平均和作为该像素的颜色。
设计思路:
- 第一步:确定径向模糊的中心点,通常取图像的正中心点、
- 第二步:计算采样像素与中心点的距离,根据距离确定偏移程度,即离中心点越远,偏移量越大。
- 第三步:将采样点的颜色值做加权求和,本例使用平均求和。
- 第四步:将前面的结果与原图像做一个lerp差值合成
Shader主要实现方式
uniform sampler2D uTexture0;
varying vec2 vTexCoord;
uniform float blurSize;//模糊尺寸
uniform vec2 anchor;//模糊锚点
uniform int Samples;
float hash(highp float seed) {
return fract(sin(seed) * 43758.5453);
}
bool isUvValid(vec2 uv) {
vec2 inRangeSp = step(0., uv) - step(1., uv);
return inRangeSp.x * inRangeSp.y > 0.5;
}
void main() {
// 不需要先缩放为正圆,因为后面还原时抵消了
vec2 dir = vTexCoord - anchor;
float nearScale = -blurSize / (blurSize + 1.);//1/(r+1)-1
float farScale = blurSize;//(r+1)-1
float deltaScale = (farScale - nearScale) / float(Samples - 1);
// 随机偏移一步范围内的距离,增加白噪声
float rand = hash(length(dir) + 1.);
vec4 color = vec4(0.);
int count = 0;
for (int i = 0; i < Samples; i++) {
vec2 uv = vTexCoord + dir * (nearScale + (float(i) + rand) * deltaScale);
if (isUvValid(uv)) {
color += texture2D(uTexture0, uv);
count++;
}
}
gl_FragColor = color / float(count);
}
2.像素拉伸:
Shader实现流程(主要步骤)
1.像素排序
比较当前传入的像素点前后临近的像素点的rgb值,根据规则进行覆盖(利用两个buffer,B每次偏移一个像素点,A在B的基础上每次进行像素偏移,从而达到分离的效果)
SortA
uniform int count;//记录当前帧数
uniform float textureWidth;
uniform float textureHeight;
uniform sampler2D u_texture1;
void main()
{
//像素归一化
vec2 iResolution = vec2(textureWidth,textureHeight);//像素坐标
vec2 uv = v_texCoord * iResolution;//当前像素坐标
float du = 1.0/iResolution.x;//单位像素坐标
float dv = 1.0/iResolution.y;
vec3 y = texture2D(u_texture1,v_texCoord).rgb;//获取当前传入像素的rgb值
//当前像素的前后临近像素的rgb
vec3 y_l = texture2D(u_texture1,v_texCoord - vec2(du, 0.)).rgb;
vec3 y_r = texture2D(u_texture1,v_texCoord + vec2(du, 0.)).rgb;
//奇数列 若左边邻近的像素更亮,则被左边像素点覆盖
if (fract(float(uv.x)/2.0) > 0.5)
{
if (length(y_l) > length(y))
{
y = y_l;
}
}
//偶数列 若右边邻近的像素更暗,则被右边像素点覆盖(偶数列像素左移)
if (fract(float(uv.x)/2.0) < 0.5)
{
if (length(y) > length(y_r))
{
y = y_r;
}
}
gl_FragColor = vec4(y,1.0);
if (count < 4)
{
vec4 color = texture2D(u_texture,v_texCoord);
gl_FragColor = color;
}
}
SortB
uniform sampler2D u_texture;
varying vec2 v_texCoord;
uniform float textureWidth;
uniform float textureHeight;
void main()
{
//像素归一化
vec2 iResolution = vec2(textureWidth,textureHeight);//像素坐标
vec2 uv = v_texCoord * iResolution;//当前像素坐标
float du = 1.0/iResolution.x;//单位像素坐标
float dv = 1.0/iResolution.y;
vec3 y = texture2D(u_texture,v_texCoord).rgb;//获取当前传入像素的rgb值
//当前像素的前后临近像素的rgb
vec3 y_l = texture2D(u_texture,v_texCoord - vec2(du, 0.)).rgb;
vec3 y_r = texture2D(u_texture,v_texCoord + vec2(du, 0.)).rgb;
//奇数列 若右边邻近的像素更暗,则被右边像素点覆盖
// if (fract(float(uv.x)/2.0) > 0.5){
// if (length(y_r) < length(y)){
// y = y_r;
// }
// }
// Modulo
if (fract(float(uv.x)/2.0) < 0.5){
if (length(y_l) > length(y)){
y = y_l;
}
}
//vec3 color = texture2D(u_texture, v_texCoord).rgb;
gl_FragColor = vec4(y,1.0);//vec4(vec3(RGB2HSL(color).r,0,0),1.0);
}
2.高光
提取高光
uniform sampler2D u_texture;
varying vec2 v_texCoord;
uniform float textureWidth;
uniform float textureHeight;
//rgb转hsl
vec3 RGB2HSL(vec3 src)
{
float maxc = max(max(src.r, src.g), src.b);
float minc = min(min(src.r, src.g), src.b);
float L = (maxc + minc) / 2.0;
if(maxc == minc)
return vec3(0.0, 0.0, L);
float H, S;
Optimize
//注意, 某些低精度情况下 N - (A+B) != N - A - B
float temp1 = maxc - minc;
S = mix(temp1 / (2.0 - maxc - minc), temp1 / (maxc + minc), step(L, 0.5));
Optimize
vec3 comp;
comp.xy = vec2(equal(src.xy, vec2(maxc)));
float comp_neg = 1.0 - comp.x;
comp.y *= comp_neg;
comp.z = (1.0 - comp.y) * comp_neg;
float dif = maxc - minc;
vec3 result = comp * vec3((src.g - src.b) / dif,
2.0 + (src.b - src.r) / dif,
4.0 + (src.r - src.g) / dif);
H = result.x + result.y + result.z;
H *= 60.0;
//if(H < 0.0) H += 360.0;
H += step(H, 0.0) * 360.0;
return vec3(H / 360.0, S, L); // H(0~1), S(0~1), L(0~1)
}
//hsl转rgb
vec3 HSL2RGB(vec3 src) // H, S, L
{
float q = (src.z < 0.5) ? src.z * (1.0 + src.y) : (src.z + src.y - (src.y * src.z));
float p = 2.0 * src.z - q;
vec3 dst = vec3(src.x + 0.333, src.x, src.x - 0.333);
Optimize
dst = fract(dst);
Optimize
//Plan B
vec3 weight = step(dst, vec3(1.0 / 6.0));
vec3 weight_neg = 1.0 - weight;
vec3 weight2 = weight_neg * step(dst, vec3(0.5));
vec3 weight2_neg = weight_neg * (1.0 - weight2);
vec3 weight3 = weight2_neg * step(dst, vec3(2.0 / 3.0));
vec3 weight4 = (1.0 - weight3) * weight2_neg;
float q_p = q - p;
dst = mix(dst, p + q_p * 6.0 * dst, weight);
dst = mix(dst, vec3(q), weight2);
dst = mix(dst, p + q_p * ((2.0 / 3.0) - dst) * 6.0, weight3);
dst = mix(dst, vec3(p), weight4);
return dst;
}