cuda 第一个代码

cuda 第一个代码

#include <cuda_runtime_api.h>
#include <iostream>

#define RANDOM(x) (rand() % x)
#define MAX 10

// single block single thread
__global__ void vector_add_gpu_1(int *d_a, int *d_b, int *d_c, int n){
	for(int i = 0; i < n; i++){
		d_c[i] = d_a[i] + d_b[i];
	}
}

// single block multiple threads
__global__ void vector_add_gpu_2(int *d_a, int *d_b, int *d_c, int n){
	int tid = threadIdx.x;
	const int t_n = blockDim.x;
	while(tid < n){
		d_c[tid] = d_a[tid] + d_b[tid];
		tid+=t_n;
	}
}

// multiple blocks multiple threads
__global__ void vector_add_gpu_3(int *d_a, int *d_b, int *d_c, int n){
	const int tidx = threadIdx.x;
	const int bidx = blockIdx.x;
	const int t_n = gridDim.x*blockDim.x;
	int tid = bidx*blockDim.x+tidx;

	while(tid<n){
		d_c[tid] = d_a[tid]+d_b[tid];
		tid += t_n;
	}
}

int main(){

	int count;
	cudaGetDeviceCount(&count);         //返回计算能力大于1.0的GPU数量

	int gpuid=0;//选择GPU: 0
	cudaSetDevice(gpuid);               //根据GPU的index设置需要的GPU,默认为0

	cudaGetDevice(&gpuid);              //获得当前线程所使用的GPU index,赋值给device

	struct cudaDeviceProp device_prop;
	cudaGetDeviceProperties(&device_prop, 0);

	std::cout<<device_prop.name <<std::endl;
	std::cout<<(device_prop.totalGlobalMem/1024/1024) << " MB " <<std::endl;
	std::cout<<(device_prop.sharedMemPerBlock/1024) << " KB " <<std::endl;
	
	/***向量相加的实现***/
	int n = 5;
	int *a = (int *)malloc(sizeof(int)*n);
	int *b = (int *)malloc(sizeof(int)*n);
	int *c = (int *)malloc(sizeof(int)*n);

	for (size_t i = 0; i<n; i++){
		a[i] = RANDOM(MAX);
		b[i] = RANDOM(MAX);
		std::cout << a[i] << "   " << b[i] << std::endl;
	}

    cudaError_t  cudaStatus;

	// GPU memory allocate
	int *d_a, *d_b, *d_c;
	cudaMalloc((void **)&d_a, sizeof(int)*n);
	cudaMalloc((void **)&d_b, sizeof(int)*n);
	cudaMalloc((void **)&d_c, sizeof(int)*n);

	// data a and b copy to GPU
	cudaStatus = cudaMemcpy(d_a, a, sizeof(int)*n, cudaMemcpyHostToDevice);
	if (cudaStatus != cudaSuccess) {
        std::cout << ("Memory copy failed! error code: %s", cudaGetErrorString(cudaStatus)) << std::endl;
    }
	cudaStatus = cudaMemcpy(d_b, b, sizeof(int)*n, cudaMemcpyHostToDevice);
	if (cudaStatus != cudaSuccess) {
        std::cout << ("Memory copy failed! error code: %s", cudaGetErrorString(cudaStatus)) << std::endl;
    }

	//vector_add_gpu_1<<<1, 1>>>(d_a, d_b, d_c, n);

	//vector_add_gpu_2<<<1, 12>>>(d_a, d_b, d_c, n);

	vector_add_gpu_3<<<4, 3>>>(d_a, d_b, d_c, n);

	// result copy back to CPU
	cudaMemcpy(c, d_c, sizeof(int)*n, cudaMemcpyDeviceToHost);
	std::cout << "the result of add is: " << std::endl;

	for (size_t i = 0; i<n; i++){
		std::cout << " " << c[i] ;
	}

	// GPU memory free
	cudaFree(d_a);
	cudaFree(d_b);
	cudaFree(d_c);
	free(a);
	free(b);
	free(c);

	return 0;
}

编译:

nvcc 01.cu -o 01.out

输出:

NVIDIA GeForce RTX 3060 Laptop GPU
5929 MB 
48 KB 
3   6
7   5
3   5
6   2
9   1
the result of add is: 
 9 12 8 8 1

PC GPU信息:

nvidia-smi

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值