最近在写C/C++ CUDA程序,踩了很多坑,写了一些能用的小程序片段,随着迭代有些需要删掉了,但是又不舍的扔掉,先放这里吧。。。。
1、checkCudaError:封装CUDA异常捕获【很好用的程序】
#define checkCudaError(cudaError) __checkCudaError(cudaError, __FILE__, __LINE__)
__host__ void __checkCudaError(cudaError_t result_t, const char *file, const int line)
{
std::string error_string;
if (cudaSuccess != result_t && cudaErrorCudartUnloading != result_t)
{
fprintf(stderr, "\x1B[31m CUDA error encountered in file '%s', line %d\n Error %d: %s\n Terminating FIRE!\n \x1B[0m", file, line, result_t,
cudaGetErrorString(result_t));
printf("CUDA error encountered: %s", cudaGetErrorString(result_t));
printf(". Terminating application.\n");
throw std::runtime_error("checkCUDAError : ERROR: CUDA Error");
}
}
2、我的cudaMemcpyToSymbol当拷贝长度大于50W的时候会报错,感觉和offset和Length的最大长度有关,自己写了个封装,把比较长的拷贝任务分解成小的拷贝任务。【然而效果和之前一样,该报错还是报错,但这个程序又不想直接丢掉,先写在这里。。】
// SINCE I CANT FIND REASON WHY DEVICE NUM ERROR, THIS FUNC IS NOT LONGER USED.
__host__ void cudaMemcpyToSymbol_H2D_float(float Variable_GPU[PCL_MAX_SIZE], float (*Variable_CPU)[PCL_MAX_SIZE], unsigned int float_num, string mode) // unsigned int in x86-64: 0~18446744073709551615
{
printf("float_num:%d\n", float_num);
float *pointer_GPU = Variable_GPU;
float *pointer_CPU = *Variable_CPU;
if (mode != "cudaMemcpyHostToDevice")
{
printf("\033[1;31m[GPU-H2D ERROR] Copy mode is not cudaMemcpyHostToDevice\033[0m");
return;
}
if (float_num <= 0)
{
printf("\033[1;31m[GPU-H2D ERROR] float_num should >0\033[0m");
return;
}
int num_left = float_num;
int num_offset = 0;
while (num_left > 0)
{
pointer_GPU += num_offset;
pointer_CPU += num_offset;
if (num_left <= COPY_OFFSET)
{
checkCudaError(cudaMemcpyToSymbol(pointer_GPU, &pointer_CPU, num_left * sizeof(float), 0, cudaMemcpyHostToDevice));
num_offset += num_left;
num_left = 0;
}
else
{
checkCudaError(cudaMemcpyToSymbol(pointer_GPU, &pointer_CPU, COPY_OFFSET * sizeof(float), 0, cudaMemcpyHostToDevice));
num_offset += COPY_OFFSET;
num_left -= COPY_OFFSET;
}
printf("float_num:%d\n", num_left);
}
return;
// cudaMemcpyToSymbol_H2D_float(PCL_X, &_PCL_X, PCL_MAX_SIZE, "cudaMemcpyHostToDevice");
// cudaMemcpyToSymbol_H2D_float(PCL_Y, & _PCL_Y, PCL_MAX_SIZE, "cudaMemcpyHostToDevice");
// cudaMemcpyToSymbol_H2D_float(PCL_Z, & _PCL_Z, PCL_MAX_SIZE, "cudaMemcpyHostToDevice");
// cudaMemcpyToSymbol_H2D_float(PCL_NX, & _PCL_NX, PCL_MAX_SIZE, "cudaMemcpyHostToDevice");
// cudaMemcpyToSymbol_H2D_float(PCL_NY, & _PCL_NY, PCL_MAX_SIZE, "cudaMemcpyHostToDevice");
// cudaMemcpyToSymbol_H2D_float(PCL_NZ, & _PCL_NZ, PCL_MAX_SIZE, "cudaMemcpyHostToDevice");
}
3、nvtx标签和一些小程序
__host__ void gpuInit()
{
nvtxRangePush("gpuInit");
cudaSetDevice(0);
float _PCL_NZ[PCL_MAX_SIZE];
checkCudaError(cudaDeviceSynchronize());
checkCudaError(cudaMemcpyToSymbol(PCL_X, &_PCL_X, sizeof(float)*PCL_MAX_SIZE,0, cudaMemcpyHostToDevice));
int num_block = (GRID_MAX_NUM + THREAD_NUM_PER_BLOCK - 1) / THREAD_NUM_PER_BLOCK;
test<<<num_block, THREAD_NUM_PER_BLOCK>>>();
cout << "\033[0;32m[CUDA-gpuInit()] PASS! \033[0m" << endl;
nvtxRangePop();
return;
}