本文主要讲述Vue与WebGL结合并实现逐片元光照。文中部分代码源自《WebGL编程指南》
代码如下
<template>
<div>
<canvas ref="myglCanvas" width="400" height="400"></canvas>
</div>
</template>
<script>
import Tools from "../lib/tools";
export default {
name: "glCanvas",
data() {
return {
VSHEADER_SOURCE: `attribute vec4 a_Position;
attribute vec4 a_Normal;
uniform mat4 u_MvpMatrix;
uniform mat4 u_ModelMatrix;
//逆转置矩阵
uniform mat4 u_NormalMatrix;
varying vec4 v_Color;
varying vec3 v_Normal;
varying vec3 v_Position;
void main(){
vec4 color = vec4(1.0, 1.0, 1.0, 1.0);
gl_Position = u_MvpMatrix* a_Position;
v_Color = color;
//计算顶点经过模型矩阵变换后的法向量
v_Normal = normalize(vec3(u_NormalMatrix * a_Normal));
//计算顶点经过模型矩阵变换后的位置
v_Position = vec3(u_ModelMatrix * a_Position);
}`,
FSHEADER_SOURCE: `#ifdef GL_ES
precision mediump float;
#endif
//入射光颜色
uniform vec3 u_LightColor;
//光源位置
uniform vec3 u_LightPosition;
//环境光
uniform vec3 u_AmbientLight;
varying vec3 v_Normal;
varying vec3 v_Position;
varying vec4 v_Color;
void main(){
//对法线归一化,因为内插后可能长度不为1
vec3 normal = normalize(v_Normal);
//计算光线方向,并归一化
vec3 lightDirection = normalize(u_LightPosition - v_Position);
//计算光线方向与法向量的点积
float nDotL = max(dot(lightDirection, normal), 0.0);
//漫反射光
vec3 diffuse = u_LightColor * v_Color.rgb * nDotL;
//环境光
vec3 ambient = u_AmbientLight * v_Color.rgb;
//最终颜色
gl_FragColor = vec4(diffuse + ambient, v_Color.a);
}`,
gl: null
};
},
methods: {
setGL: function() {
this.gl = this.$refs.myglCanvas.getContext("webgl");
},
initVertexBuffers: function(gl) {
var SPHERE_DIV = 13;
var i, ai, si, ci;
var j, aj, sj, cj;
var p1, p2;
var positions = [];
var indices = [];
// Generate coordinates
for (j = 0; j <= SPHERE_DIV; j++) {
aj = (j * Math.PI) / SPHERE_DIV;
sj = Math.sin(aj);
cj = Math.cos(aj);
for (i = 0; i <= SPHERE_DIV; i++) {
ai = (i * 2 * Math.PI) / SPHERE_DIV;
si = Math.sin(ai);
ci = Math.cos(ai);
positions.push(si * sj); // X
positions.push(cj); // Y
positions.push(ci * sj); // Z
}
}
// Generate indices
for (j = 0; j < SPHERE_DIV; j++) {
for (i = 0; i < SPHERE_DIV; i++) {
p1 = j * (SPHERE_DIV + 1) + i;
p2 = p1 + (SPHERE_DIV + 1);
indices.push(p1);
indices.push(p2);
indices.push(p1 + 1);
indices.push(p1 + 1);
indices.push(p2);
indices.push(p2 + 1);
}
}
if (!this.initArrayBuffer(gl,"a_Position",new Float32Array(positions),gl.FLOAT,3))
return -1;
if (!this.initArrayBuffer(gl,"a_Normal",new Float32Array(positions),gl.FLOAT,3))
return -1;
// Unbind the buffer object
gl.bindBuffer(gl.ARRAY_BUFFER, null);
// Write the indices to the buffer object
var indexBuffer = gl.createBuffer();
if (!indexBuffer) {
console.log("Failed to create the buffer object");
return -1;
}
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indexBuffer);
gl.bufferData(
gl.ELEMENT_ARRAY_BUFFER,
new Uint16Array(indices),
gl.STATIC_DRAW
);
return indices.length;
},
initArrayBuffer: function(gl, attribute, data, type, num) {
// Create a buffer object
var buffer = gl.createBuffer();
if (!buffer) {
console.log("Failed to create the buffer object");
return false;
}
// Write date into the buffer object
gl.bindBuffer(gl.ARRAY_BUFFER, buffer);
gl.bufferData(gl.ARRAY_BUFFER, data, gl.STATIC_DRAW);
// Assign the buffer object to the attribute variable
var a_attribute = gl.getAttribLocation(gl.program, attribute);
if (a_attribute < 0) {
console.log("Failed to get the storage location of " + attribute);
return false;
}
gl.vertexAttribPointer(a_attribute, num, type, false, 0, 0);
// Enable the assignment of the buffer object to the attribute variable
gl.enableVertexAttribArray(a_attribute);
return true;
},
setMatrix:function(gl){
// Get the storage locations of uniform variables
let u_ModelMatrix = gl.getUniformLocation(gl.program, 'u_ModelMatrix');
let u_MvpMatrix = gl.getUniformLocation(gl.program, 'u_MvpMatrix');
let u_NormalMatrix = gl.getUniformLocation(gl.program, 'u_NormalMatrix');
let u_LightColor = gl.getUniformLocation(gl.program, 'u_LightColor');
let u_LightPosition = gl.getUniformLocation(gl.program, 'u_LightPosition');
let u_AmbientLight = gl.getUniformLocation(gl.program, 'u_AmbientLight');
if (!u_ModelMatrix || !u_MvpMatrix || !u_NormalMatrix || !u_LightColor || !u_LightPosition || !u_AmbientLight) {
console.log('Failed to get the storage location');
return;
}
// Set the light color (white)
gl.uniform3f(u_LightColor, 0.8, 0.8, 0.8);
// Set the light direction (in the world coordinate)
gl.uniform3f(u_LightPosition, 5.0, 8.0, 7.0);
// Set the ambient light
gl.uniform3f(u_AmbientLight, 1, 0, 0);
let mvpMatrix = mat4.create(); // Model view projection matrix
let normalMatrix = mat4.create(); // Transformation matrix for normals
//创建mvp三个矩阵
let modelMatrix = mat4.create(); // 模型矩阵
let viewMatrix = mat4.create(); // 视图矩阵
let projMatrix = mat4.create(); // 投影矩阵
// Calculate the model matrix
mat4.fromYRotation(modelMatrix, Math.PI/2.0); // Rotate around the y-axis
mat4.lookAt(viewMatrix, [0,0,6], [0,0,0], [0,1,0]);
//获取画布的宽高
let canvasWidth = this.$refs.myglCanvas.width;
let canvasHeight = this.$refs.myglCanvas.height;
//将角度转为弧度
const angleRad =30 * Math.PI / 180;
//设置投影矩阵
mat4.perspective(projMatrix, angleRad, canvasWidth/canvasHeight, 1, 100);
//mvp矩阵
let mv_Matrix = mat4.create();
mat4.mul(mv_Matrix, viewMatrix,modelMatrix);
mat4.mul(mvpMatrix, projMatrix,mv_Matrix);
// Calculate the matrix to transform the normal based on the model matrix
mat4.invert(normalMatrix, modelMatrix);
mat4.transpose(normalMatrix, normalMatrix);
// Pass the model matrix to u_ModelMatrix
gl.uniformMatrix4fv(u_ModelMatrix, false, modelMatrix);
// Pass the model view projection matrix to u_mvpMatrix
gl.uniformMatrix4fv(u_MvpMatrix, false, mvpMatrix);
// Pass the transformation matrix for normals to u_NormalMatrix
gl.uniformMatrix4fv(u_NormalMatrix, false, normalMatrix);
}
},
mounted() {
this.setGL();
Tools.initShaders(this.gl, this.VSHEADER_SOURCE, this.FSHEADER_SOURCE);
let n = this.initVertexBuffers(this.gl);
if (n < 0) {
console.log('Failed to set the vertex information');
return;
}
// Set the clear color and enable the depth test
this.gl.clearColor(0.0, 0.0, 0.0, 1.0);
this.gl.enable(this.gl.DEPTH_TEST);
this.setMatrix(this.gl);
// Clear color and depth buffer
this.gl.clear(this.gl.COLOR_BUFFER_BIT | this.gl.DEPTH_BUFFER_BIT);
// Draw the cube
this.gl.drawElements(this.gl.TRIANGLES, n,this. gl.UNSIGNED_SHORT, 0);
}
};
</script>
<style scoped>
</style>
第30行 计算顶点经过模型矩阵变换后的法向量。计算方法是将变换之前的法向量乘以模型矩阵的逆转置矩阵,即逆矩阵的转置。代码中第205、206行实现模型矩阵的逆转置矩阵。
第51-55行 计算漫反射光,
漫反射光颜色 = 入射光颜色 x 表面基底色 x cosθ,
如果光线向量与法向量均为单位向量,那么
cosθ = 光线向量 · 法向量
因此可以得出:
漫反射光颜色 = 入射光颜色 x 表面基底色 x(光线向量 · 法向量)
第59行 计算表面的反射光颜色:
表面反射光颜色 = 漫反射光颜色 + 环境反射光颜色
运行程序,效果如下
更多内容请扫码关注我的微信公众号,或者在微信里搜索公众号webgis学习,我会不定期更新自己的web方面的学习心得。