首先看下我们在CPU上是如何用二级指针表示二维数组的,其实就两点:一是用一级指针保存数据,二是用二级指针去按行索引数据位置。关于一级指针和二级指针的内存分配这里不讲了,注意数据类型就可以了。
代码做了相关说明,应该比较好理解:
#define Row 8
#define Col 4
//声明Row个行指针: cpuA cpuA+0 cpuA+1 cpuA+Row
int **cpuA = (int **)malloc(Row * sizeof(int*));
int *cpudataA = (int*)malloc(Row*Col * sizeof(int));
//cpuA[i] 看作行指针
for (int i = 0; i < Row; i++) {
cpuA[i] = cpudataA + Col*i;
}
//数据赋值
for (int i = 0; i < Row*Col; i++) {
cpudataA[i] = i;
}
//按照二维数组形式索引数据
for (int i = 0; i < Row; i++) {
for (int j = 0; j < Col; j++) {
printf("%5d", cpuA[i][j]);
}
printf("\n");
}
然后对应我们在GPU上使用二级指针,整体上其实和CPU没有多大区别,把二级指针和一级指针保存的数据 传送到设备上去,然后在设备上建立二级指针和一级指针的对应关系。目的就是让二级指针能够指到对应的数据位置。
这里简单了画了个示意图:
GPU代码如下;
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#define Row 8
#define Col 4
__global__ void addKernel(int **C, int **A)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
if (idx < Col && idy < Row) {
C[idy][idx] = A[idy][idx] + 10;
}
}
int main()
{
int **A = (int **)malloc(sizeof(int*) * Row);
int **C = (int **)malloc(sizeof(int*) * Row);
int *dataA = (int *)malloc(sizeof(int) * Row * Col);
int *dataC = (int *)malloc(sizeof(int) * Row * Col);
int **d_A;
int **d_C;
int *d_dataA;
int *d_dataC;
//malloc device memory
cudaMalloc((void**)&d_A, sizeof(int **) * Row);
cudaMalloc((void**)&d_C, sizeof(int **) * Row);
cudaMalloc((void**)&d_dataA, sizeof(int) *Row*Col);
cudaMalloc((void**)&d_dataC, sizeof(int) *Row*Col);
//set value
for (int i = 0; i < Row*Col; i++) {
dataA[i] = i+1;
}
//将主机指针A指向设备数据位置,目的是让设备二级指针能够指向设备数据一级指针
//A 和 dataA 都传到了设备上,但是二者还没有建立对应关系
for (int i = 0; i < Row; i++) {
A[i] = d_dataA + Col * i;
C[i] = d_dataC + Col * i;
}
cudaMemcpy(d_A, A, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, sizeof(int*) * Row, cudaMemcpyHostToDevice);
cudaMemcpy(d_dataA, dataA, sizeof(int) * Row * Col, cudaMemcpyHostToDevice);
dim3 threadPerBlock(4, 4);
dim3 blockNumber( (Col + threadPerBlock.x - 1)/ threadPerBlock.x, (Row + threadPerBlock.y - 1) / threadPerBlock.y );
printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y);
addKernel << <blockNumber, threadPerBlock >> > (d_C, d_A);
//拷贝计算数据-一级数据指针
cudaMemcpy(dataC, d_dataC, sizeof(int) * Row * Col, cudaMemcpyDeviceToHost);
for (int i = 0; i < Row*Col; i++) {
if (i%Col == 0) {
printf("\n");
}
printf("%5d", dataC[i]);
}
printf("\n");
}
实验结果: