参考资料
CUDA编程模型系列三(矩阵乘)_哔哩哔哩_bilibili
极力推荐的良心课程~
代码片段
#include <stdio.h>
const int matrix_size = 32;
const int BLOCK_SIZE = 16;
__global__ void matrix_multi(const int* a1, const int *b1, int *c1, const int size)
{
int y = blockDim.y * blockIdx.y + threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
int t_sum = 0;
if(x < size && y < size) {
for(int step = 0; step < size; step++) {
t_sum += a1[y * size + step] * b1[step * size + x];
}
c1[y * size + x] = t_sum;
printf("x: %d y: %d sum: %d \n", x, y, t_sum);
}
}
int main()
{
int* a1, *b1, *c1;
int msize = matrix_size * matrix_size * sizeof(int);
// 分配内存空间
cudaMallocHost((void**)&a1, msize);
cudaMallocHost((void**)&b1, msize);
cudaMallocHost((void**)&c1, msize);
// 初始化
for(int i = 0; i < matrix_size; i++) {
for(int j = 0; j < matrix_size; j++) {
a1[i * matrix_size + j] = 1;
b1[i * matrix_size + j] = 1;
}
}
printf("Check result a1: %d \n", a1[1]);
printf("Check result b1: %d \n", b1[1]);
// 分配设备空间
int *x1, *y1, *z1;
cudaMalloc((void**)&x1, msize);
cudaMalloc((void**)&y1, msize);
cudaMalloc((void**)&z1, msize);
// Host => Device
cudaMemcpy(x1, a1, msize, cudaMemcpyHostToDevice);
cudaMemcpy(y1, b1, msize, cudaMemcpyHostToDevice);
// printf("Check result: %d \n", x1[1]);
// printf("Check result: %d \n", y1[1]);
int grid_rows = (matrix_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
int grid_cols = (matrix_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 gridDim(grid_cols, grid_rows);
dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE);
// 计算
matrix_multi<<<gridDim, blockDim>>>(x1, y1, z1, matrix_size);
// Device => Host
cudaMemcpy(c1, z1, msize, cudaMemcpyDeviceToHost);
printf("Check result c1: %d \n", c1[1]);
// 释放空间
cudaFreeHost(a1);
cudaFreeHost(b1);
cudaFreeHost(c1);
cudaFree(x1);
cudaFree(y1);
cudaFree(z1);
return 0;
}