cuda 第一个代码
#include <cuda_runtime_api.h>
#include <iostream>
#define RANDOM(x) (rand() % x)
#define MAX 10
// single block single thread
__global__ void vector_add_gpu_1(int *d_a, int *d_b, int *d_c, int n){
for(int i = 0; i < n; i++){
d_c[i] = d_a[i] + d_b[i];
}
}
// single block multiple threads
__global__ void vector_add_gpu_2(int *d_a, int *d_b, int *d_c, int n){
int tid = threadIdx.x;
const int t_n = blockDim.x;
while(tid < n){
d_c[tid] = d_a[tid] + d_b[tid];
tid+=t_n;
}
}
// multiple blocks multiple threads
__global__ void vector_add_gpu_3(int *d_a, int *d_b, int *d_c, int n){
const int tidx = threadIdx.x;
const int bidx = blockIdx.x;
const int t_n = gridDim.x*blockDim.x;
int tid = bidx*blockDim.x+tidx;
while(tid<n){
d_c[tid] = d_a[tid]+d_b[tid];
tid += t_n;
}
}
int main(){
int count;
cudaGetDeviceCount(&count); //返回计算能力大于1.0的GPU数量
int gpuid=0;//选择GPU: 0
cudaSetDevice(gpuid); //根据GPU的index设置需要的GPU,默认为0
cudaGetDevice(&gpuid); //获得当前线程所使用的GPU index,赋值给device
struct cudaDeviceProp device_prop;
cudaGetDeviceProperties(&device_prop, 0);
std::cout<<device_prop.name <<std::endl;
std::cout<<(device_prop.totalGlobalMem/1024/1024) << " MB " <<std::endl;
std::cout<<(device_prop.sharedMemPerBlock/1024) << " KB " <<std::endl;
/***向量相加的实现***/
int n = 5;
int *a = (int *)malloc(sizeof(int)*n);
int *b = (int *)malloc(sizeof(int)*n);
int *c = (int *)malloc(sizeof(int)*n);
for (size_t i = 0; i<n; i++){
a[i] = RANDOM(MAX);
b[i] = RANDOM(MAX);
std::cout << a[i] << " " << b[i] << std::endl;
}
cudaError_t cudaStatus;
// GPU memory allocate
int *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a, sizeof(int)*n);
cudaMalloc((void **)&d_b, sizeof(int)*n);
cudaMalloc((void **)&d_c, sizeof(int)*n);
// data a and b copy to GPU
cudaStatus = cudaMemcpy(d_a, a, sizeof(int)*n, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::cout << ("Memory copy failed! error code: %s", cudaGetErrorString(cudaStatus)) << std::endl;
}
cudaStatus = cudaMemcpy(d_b, b, sizeof(int)*n, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
std::cout << ("Memory copy failed! error code: %s", cudaGetErrorString(cudaStatus)) << std::endl;
}
//vector_add_gpu_1<<<1, 1>>>(d_a, d_b, d_c, n);
//vector_add_gpu_2<<<1, 12>>>(d_a, d_b, d_c, n);
vector_add_gpu_3<<<4, 3>>>(d_a, d_b, d_c, n);
// result copy back to CPU
cudaMemcpy(c, d_c, sizeof(int)*n, cudaMemcpyDeviceToHost);
std::cout << "the result of add is: " << std::endl;
for (size_t i = 0; i<n; i++){
std::cout << " " << c[i] ;
}
// GPU memory free
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
编译:
nvcc 01.cu -o 01.out
输出:
NVIDIA GeForce RTX 3060 Laptop GPU
5929 MB
48 KB
3 6
7 5
3 5
6 2
9 1
the result of add is:
9 12 8 8 1
PC GPU信息:
nvidia-smi