<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>てst</title>
</head>
<body>
<!-- canvas:用来展示WebGPU渲染的结果 -->
<canvas id="webgpu" width="500" height="500"></canvas>
<script type="module">
//chrome setting:
//"C:\Program Files\Google\Chrome\Application\chrome.exe" --disable-web-security --user-data-dir="D:\angular\webgpu\n" --disable-site-isolation-trials
//
//1,增加一个uniform buffer object(简称为ubo),
//用于传输“model矩阵 乘以 view矩阵 乘以 projection矩阵”的结果矩阵(简称为mvp矩阵),并在每帧被更新
//2.设置顶点
//3.开启面剔除
//4.开启深度测试
// 与“rotatingCube”示例相比,该示例增加了以下的内容:
//一个ubo保存两个立方体的mvp矩阵
//每帧更新两个mvp矩阵数据
//draw两次,分别设置对应的uniformBindGroup
// 配置WebGPU上下文
const adapter = await navigator.gpu.requestAdapter();
const device = await adapter.requestDevice();
const canvas = document.getElementById('webgpu');
const context = canvas.getContext('webgpu');
const format = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device: device,
format: format,
});
const cubeVertexArray2 = new Float32Array([
// float4 position, float4 color, float2 uv,
1, -1, 1, 1, 1, 0, 1, 1, 1, 1,
-1, -1, 1, 1, 0, 0, 1, 1, 0, 1,
-1, -1, -1, 1, 0, 0, 0, 1, 0, 0,
1, -1, -1, 1, 1, 0, 0, 1, 1, 0,
1, -1, 1, 1, 1, 0, 1, 1, 1, 1,
-1, -1, -1, 1, 0, 0, 0, 1, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, -1, 1, 1, 1, 0, 1, 1, 0, 1,
1, -1, -1, 1, 1, 0, 0, 1, 0, 0,
1, 1, -1, 1, 1, 1, 0, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, -1, -1, 1, 1, 0, 0, 1, 0, 0,
-1, 1, 1, 1, 0, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 0, 1,
1, 1, -1, 1, 1, 1, 0, 1, 0, 0,
-1, 1, -1, 1, 0, 1, 0, 1, 1, 0,
-1, 1, 1, 1, 0, 1, 1, 1, 1, 1,
1, 1, -1, 1, 1, 1, 0, 1, 0, 0,
-1, -1, 1, 1, 0, 0, 1, 1, 1, 1,
-1, 1, 1, 1, 0, 1, 1, 1, 0, 1,
-1, 1, -1, 1, 0, 1, 0, 1, 0, 0,
-1, -1, -1, 1, 0, 0, 0, 1, 1, 0,
-1, -1, 1, 1, 0, 0, 1, 1, 1, 1,
-1, 1, -1, 1, 0, 1, 0, 1, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
-1, 1, 1, 1, 0, 1, 1, 1, 0, 1,
-1, -1, 1, 1, 0, 0, 1, 1, 0, 0,
-1, -1, 1, 1, 0, 0, 1, 1, 0, 0,
1, -1, 1, 1, 1, 0, 1, 1, 1, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, -1, -1, 1, 1, 0, 0, 1, 1, 1,
-1, -1, -1, 1, 0, 0, 0, 1, 0, 1,
-1, 1, -1, 1, 0, 1, 0, 1, 0, 0,
1, 1, -1, 1, 1, 1, 0, 1, 1, 0,
1, -1, -1, 1, 1, 0, 0, 1, 1, 1,
-1, 1, -1, 1, 0, 1, 0, 1, 0, 0,
]);
//-------设置顶点--------------------------
//引入gl-matrix.js库
import * as glMatrix from './dist/esm/index.js';
// 定点缓冲区
const vertexBuffer = device.createBuffer({
size: cubeVertexArray2.byteLength, //顶点数据的字节长度
//usage设置该缓冲区的用途(作为顶点缓冲区|可以写入顶点数据)
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
// 将顶点信息写入缓冲器
device.queue.writeBuffer(vertexBuffer, 0, cubeVertexArray2);
//------------wsgl代码-------------------
const module = device.createShaderModule({
label: 'triangle shaders with uniforms',
code: `
struct Uniforms {
modelViewProjectionMatrix : mat4x4f//这里不能用分号
};
@group(0) @binding(0) var<uniform> uniforms : Uniforms;
struct VertexOutput {
@builtin(position) Position : vec4f,
@location(0) fragUV : vec2f,
@location(1) fragPosition: vec4f
};
@vertex
fn vs(@location(0) position : vec4f,@location(1) uv : vec2f) -> VertexOutput {
var output : VertexOutput;
output.Position = uniforms.modelViewProjectionMatrix * position;
output.fragUV = uv;
output.fragPosition = 0.5 * (position + vec4f(1.0, 1.0, 1.0, 1.0));
return output;
}
@fragment
fn fs(@location(0) fragUV: vec2<f32>,@location(1) fragPosition: vec4<f32>) -> @location(0) vec4<f32> {
return fragPosition;
}
`,
});
let cubePositionOffset = 0;
let cubeUVOffset = 4 * 8;
let cubeVertexSize = 4 * 10;
const pipeline = device.createRenderPipeline({
layout: "auto",
vertex: {
module: module,
entryPoint: 'vs',
buffers: [ // 为顶点着色器配置Buffer
{
arrayStride: cubeVertexSize,
attributes: [
{
// position
shaderLocation: 0,
offset: cubePositionOffset,
format: 'float32x4',
},
{
// uv
shaderLocation: 1,
offset: cubeUVOffset,
format: 'float32x2',
},
],
},
],
},
fragment: {
module: module,
entryPoint: 'fs',
targets: [
{
format: format,
},
],
},
primitive: {
topology: 'triangle-list',
// Backface culling since the cube is solid piece of geometry.
// Faces pointing away from the camera will be occluded by faces
// pointing toward the camera.
//3.开启背面面剔除
cullMode: 'back',
},
//multisample: {
// count: 4
//},
// Enable depth testing so that the fragment closest to the camera
// is rendered in front.
//4.开启深度测试
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth24plus',
},
});
/*
uniform buffer要保存两个mvp矩阵的数据,但是它们不能连续存放,它们的起始位置必须为256的倍数,所以uniform buffer实际的内存布局为:
0-63:第一个mvp矩阵
64-255:0(占位)
256-319:第二个mvp矩阵
uniform buffer的size为256+64=320
创建uniform bind group
创建两个uniform bind group,通过指定offset和size,对应到同一个uniform buffer:
*/
const matrixSize = 4 * 16; // 4x4 matrix
const offset = 256;
const uniformBufferSize = offset + matrixSize;
const uniformBuffer = device.createBuffer({
size: uniformBufferSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
const uniformBindGroup1 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: uniformBuffer,
offset: 0,
size: matrixSize
},
},
],
});
const uniformBindGroup2 = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{
binding: 0,
resource: {
buffer: uniformBuffer,
offset: offset,
size: matrixSize
},
},
],
});
const depthTexture = device.createTexture({
size: {
width: canvas.width,
height: canvas.height,
},
format: "depth24plus",
//sampleCount: 4,
usage: GPUTextureUsage.RENDER_ATTACHMENT
});
const renderPassDescriptor = {
colorAttachments: [
{
// view: context.getCurrentTexture().createView(), // Assigned later
loadOp: 'clear',
loadValue: { r: 0.5, g: 0.5, b: 0.5, a: 1.0 },
storeOp: 'store',
},
],
depthStencilAttachment: {
view: depthTexture.createView(),
depthStoreOp: 'store',
depthClearValue: 1.0,
depthLoadOp: "clear",
depthLoadValue: 1.0,
// depthStoreOp: 'store',
// stencilLoadValue: 0,
//stencilStoreOp: 'store',
},
};
//因为是固定相机,所以只需要计算一次projection矩阵
//固定相机,透视投影
const aspect = Math.abs(canvas.width / canvas.height);
let projectionMatrix = glMatrix.mat4.create();
glMatrix.mat4.perspective(projectionMatrix, (2 * Math.PI) / 5, aspect, 1, 100.0);
let modelMatrix1 = glMatrix.mat4.create();
glMatrix.mat4.translate(modelMatrix1, modelMatrix1, glMatrix.vec3.fromValues(-2, 0, 0));
let modelMatrix2 = glMatrix.mat4.create();
glMatrix.mat4.translate(modelMatrix2, modelMatrix2, glMatrix.vec3.fromValues(2, 0, 0));
let modelViewProjectionMatrix1 = glMatrix.mat4.create();
let modelViewProjectionMatrix2 = glMatrix.mat4.create();
//因为是固定相机,所以只需要计算一次view矩阵
let viewMatrix = glMatrix.mat4.create();
glMatrix.mat4.translate(viewMatrix, viewMatrix, glMatrix.vec3.fromValues(0, 0, -9));
let tmpMat41 = glMatrix.mat4.create();
let tmpMat42 = glMatrix.mat4.create();
//每帧更新两个mvp矩阵数据
//updateTransformationMatrix函数更新两个mvp矩阵;
function updateTransformationMatrix() {
let now = Date.now() / 1000;
glMatrix.mat4.rotate(tmpMat41, modelMatrix1, 1, glMatrix.vec3.fromValues(Math.sin(now), Math.cos(now), 0));
glMatrix.mat4.rotate(tmpMat42, modelMatrix2, 1, glMatrix.vec3.fromValues(Math.cos(now), Math.sin(now), 0));
glMatrix.mat4.multiply(modelViewProjectionMatrix1, viewMatrix, tmpMat41);
glMatrix.mat4.multiply(modelViewProjectionMatrix1, projectionMatrix, modelViewProjectionMatrix1);
glMatrix.mat4.multiply(modelViewProjectionMatrix2, viewMatrix, tmpMat42);
glMatrix.mat4.multiply(modelViewProjectionMatrix2, projectionMatrix, modelViewProjectionMatrix2);
}
//------------------
function frame() {
//updateTransformationMatrix函数更新两个mvp矩阵;
updateTransformationMatrix();
device.queue.writeBuffer(
uniformBuffer,
0,
modelViewProjectionMatrix1.buffer,
modelViewProjectionMatrix1.byteOffset,
modelViewProjectionMatrix1.byteLength
);
device.queue.writeBuffer(
uniformBuffer,
offset,
modelViewProjectionMatrix2.buffer,
modelViewProjectionMatrix2.byteOffset,
modelViewProjectionMatrix2.byteLength
);
//必须在这里设置texture,否则报错
renderPassDescriptor.colorAttachments[0].view = context
.getCurrentTexture()
.createView();
const commandEncoder = device.createCommandEncoder();
const passEncoder = commandEncoder.beginRenderPass(renderPassDescriptor);
passEncoder.setPipeline(pipeline);
passEncoder.setVertexBuffer(0, vertexBuffer);
passEncoder.setBindGroup(0, uniformBindGroup1);
passEncoder.draw(36, 1, 0, 0);
passEncoder.setBindGroup(0, uniformBindGroup2);
passEncoder.draw(36, 1, 0, 0);
passEncoder.end();
device.queue.submit([commandEncoder.finish()]);
requestAnimationFrame(frame);
};
frame();
</script>
</body>
</html>