// includes CUDA Runtime
#include <cuda_runtime.h>//目前cuda使用所需头文件
// includes, project
#include <helper_cuda.h>//下文中的checkcudaerrors
#include <helper_functions.h> // helper utility functions
__global__ void increment_kernel(int *g_data, int inc_value) //这是一个global函数,意思是cpu函数可以调用在设备上运行的函数,一般这个都是cuda的入口
{
int idx = blockIdx.x * blockDim.x + threadIdx.x; //这里面有线程块x方向上的索引(数量)*每个线程块中线程数量+线程x方向上的索引(数量)
g_data[idx] = g_data[idx] + inc_value; //这是一个数组加法,没什么问题
}
bool correct_output(int *data, const int n, const int x) //这是一个用来判断GPU与CPU运算结果是否一致的函数,bool变量
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps; /*<strong>cudaDeviceProp</strong>数据类型针对函式 <strong>cudaGetDeviceProperties</strong>定义的,<strong>cudaGetDeviceProperties</strong>函数的功能是取得支持GPU计算的装置的相关属性,如支持CUDA版本号装置的名称、内存的大小、最大的 thread 数目、执行单元的频率
上面有这个struct的所有属性*/
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv); //这是一个选cuda设备的函数
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID)); //这个函数用来获取设备名称,英伟达显卡型号
printf("CUDA device [%s]\n", deviceProps.name);
int n = 16 * 1024 * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes)); //简单来说这是一个申请了nbytes大小的内存空间在内存上。你可以通过a //指针找到他
memset(a, 0, nbytes); //stl的标准库函数,将a里面所有int字节单位的地址内容赋值为0
// allocate device memory
int *d_a=0; //可以看到他定义了一个int指针
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes)); //一样的申请内存,不过是在GPU上
checkCudaErrors(cudaMemset(d_a, 255, nbytes)); //和stl的memset函数貌似并没有区别,只是赋值为255
// set kernel launch configuration
dim3 threads = dim3(512, 1); //512*1的二维的线程
dim3 blocks = dim3(n / threads.x, 1); //16*2048的二维线程块
// create cuda event handles
cudaEvent_t start, stop; //申明了两种用于检测cuda函数执行时间的对象
checkCudaErrors(cudaEventCreate(&start)); //创建
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer); //类似cudaEventCreate
sdkResetTimer(&timer); //初始化?
checkCudaErrors(cudaDeviceSynchronize()); //目测是一个同步
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0); //这是一种异步拷贝的动作,不过在这个函数中使用普通的cudaMemcpy也是没有问题的,因为并不存在其他的kernel
increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value); //没错,这就是kernel函数,block,thread,第三个不懂,第四个是流的概念
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0); //和上面一样
cudaEventRecord(stop, 0); //关闭计时,获取准确时间
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop)); //获取时间差,也就是函数执行时间
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = correct_output(a, n, value);
// release resources
checkCudaErrors((start)); //销毁之前创建的用于计时的事件
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a)); //释放内存
checkCudaErrors(cudaFree(d_a));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}