Part2
一、CUDA线程层次与索引
1、在CUDA的编程模型中,以线程为基本单位,分别有线程(thread)、线程块(block)、线程网格(grid)等概念。
(1)线程
i.所有的线程执行相同的核函数
ii.各个线程并行执行
iii.线程执行在CUDA core上
(2)线程块(一组线程)
i.执行在一个SM(流多处理器)上
ii.线程块中的线程可以相互协作
(3)线程网格(一组线程块构成)
i.一个线程网格中的线程块可以在多个SM(流多处理器)上执行
2、线程、线程块和线程网格的对应内置变量分别有threadIdx,blockIdx,blockDim,gridIdx,gridDim,下边通过举例进行说明。
// Thread index 线程在所属线程块中的索引
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int tz = threadIdx.z;
// Block index 线程块在网格中的索引
unsigned int bx = blockIdx.x;
unsigned int by = blockIdx.y;
unsigned int bz = blockIdx.z;
// Block dim 线程块的维度信息
unsigned int bdx = blockDim.x;
unsigned int bdy = blockDim.y;
unsigned int bdz = blockDim.z;
// Grid dim 网格的维度信息
unsigned int gdx = girdDim.x;
unsigned int gdy = girdDim.y;
unsigned int gdz = girdDim.z;
下图中网格的维度为3*2,线程块的维度为5*3。
![](https://img-blog.csdnimg.cn/img_convert/1f6e6915135f13eb46814b218c708863.png)
3、CUDA执行流程
(1)加载核函数
(2)将一个线程网格分配到一个device(老黄的卡)上
(3)根据执行配置(<<<>>>)的第一个参数,也就是网格的的维度,Giga threads engine将block分配到SM中。一个block内的线程一定会在同一个SM中,但一个SM可以执行多个block。
(4)根据执行配置的第二个参数,也就是block的维度,warp(32个线程为一个warp)调度器会对线程进行调度。
二、实验
矩阵乘法实验示例
#include <stdio.h>
#include <math.h>
#define BLOCK_SIZE 16
__global__ void gpu_matrix_mult(int *a,int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
int main(int argc, char const *argv[])
{
int m=100;
int n=100;
int k=100;
int *h_a, *h_b, *h_c, *h_cc;
cudaMallocHost((void **) &h_a, sizeof(int)*m*n);
cudaMallocHost((void **) &h_b, sizeof(int)*n*k);
cudaMallocHost((void **) &h_c, sizeof(int)*m*k);
cudaMallocHost((void **) &h_cc, sizeof(int)*m*k);
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024;
}
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
h_b[i * k + j] = rand() % 1024;
}
}
int *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(int)*m*n);
cudaMalloc((void **) &d_b, sizeof(int)*n*k);
cudaMalloc((void **) &d_c, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice);
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k);
cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
//cudaThreadSynchronize();
cpu_matrix_mult(h_a, h_b, h_cc, m, n, k);
int ok = 1;
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
if(fabs(h_cc[i*k + j] - h_c[i*k + j])>(1.0e-10))
{
ok = 0;
}
}
}
if(ok)
{
printf("Pass!!!\n");
}
else
{
printf("Error!!!\n");
}
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFreeHost(h_cc);
return 0;
}