统一内存(Unified Memory)
概念
Unified memory就是能被系统中的任何处理器(包括CPU和GPU)访问的单个内存空间。当运行在CPU或者GPU上的代码访问通过cudaMallocManaged分配的内存时,CUDA 系统软件与硬件负责将内存页迁移到访问的处理器上。
//CPU code CUDA 6 code
void sortfile(FILE *fp, int N) void sortfile(FILE *fp, int N)
{ {
char *data; char *data;
data = (char*)malloc(N); cudaMallocManaged(data, N);
fread(data, 1, N, fp); fread(data, 1, N, fp);
qsort(data, N, 1, compare); qsort<<<...>>>(data, N, 1, compare);
cudaDeviceSynchronize();
usedata(data); usedata(data);
free(data); free(data);
}
由于CPU与GPU间是异步执行,因此需要调用cudaDeviceSynchronize进行同步。
在CUDA 6.0之前,必须在CPU和GPU两端都进行内存分配,并不断地进行手动copy。
使用Unified Memory可简化代码编写和内存模型,可以在CPU端和GPU端共用一个指针,不用单独各自分配空间,减少了代码量,使得代码迁移更方便。
使用方法:
使用关键字 __managed__ 或调用函数
cudaError_t cudaMallocManaged(void **devPtr,size_t size,unsigned int flag=0)
实验示例
//使用统一内存
#include <stdio.h>
#include <math.h>
#include "error.cuh"
#define BLOCK_SIZE 16
__managed__ int a[1000 * 1000];
__managed__ int b[1000 * 1000];
__managed__ int c_gpu[1000 * 1000];
__managed__ int c_cpu[1000 * 1000];
__global__ void gpu_matrix_mult_shared(int *d_a, int *d_b, int *d_result, int m, int n, int k)
{
__shared__ int tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub)
{
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
tile_a[threadIdx.y][threadIdx.x] = row<n && (sub * BLOCK_SIZE + threadIdx.x)<n? d_a[idx]:0;
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
tile_b[threadIdx.y][threadIdx.x] = col<n && (sub * BLOCK_SIZE + threadIdx.y)<n? d_b[idx]:0;
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n)
{
d_result[row * n + col] = tmp;
}
}
void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
int tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * h_b[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
int main(int argc, char const *argv[])
{
int m=100;
int n=100;
int k=100;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
a[i * n + j] = 0*rand() % 1024+1;
}
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
b[i * k + j] = 0 * rand() % 1024 +1;
}
}
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
cudaEventRecord(start);
gpu_matrix_mult_shared << <dimGrid, dimBlock >> > (a, b, c_gpu, m, n, k);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float timeout;
cudaEventElapsedTime(&timeout,start,stop);
printf("Timeout is %g ms,\n",timeout);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//cudaThreadSynchronize(&timeout,start,stop);
cpu_matrix_mult(a, b, c_cpu, m, n, k);
int ok = 1;
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
//printf("GPU: % d; CPU: %d; ", h_c[i * k + j], h_cc[i * k + j]);
if (fabs(c_gpu[i * k + j] - c_cpu[i * k + j]) > (1.0e-10))
{
ok = 0;
}
//printf("\n");
}
}
if (ok)
{
printf("Pass!!!\n");
}
else
{
printf("Error!!!\n");
}
return 0;
}
统一内存通过使设备内存管理成为优化,降低了在 CUDA 平台上进行并行编程的门槛。