TensorRT及CUDA自学笔记007 运行时库及矩阵加法demo
Runtime 运行时库
Runtime 运行时库是CUDA平台的API接口层
runtime可以以static和动态的形式链接到CUDA程序中,
在动态链接的CUDA程序安装包中需要包含运行时库文件
CUDA运行时库提供了如下功能
- 设备内存管理 、共享内存管理、页锁定主机内存管理
- 系统内各种层次的异步并行执行机制(多内核并发执行、数据拷贝和内核并发执行)
- 多GPU设备管理
- 运行库错误检查机制
- 函数调用栈查询和设置
- 纹理和曲面内存管理
- 图形接口(OpenGL、Direct3D)互操作
生命周期
- 自动完成运行时库的初始化,如果使用运行时库接口开发程序运行时库在第一次调用非CUDA版本管理或设备管理的接口时完成初始化。
- 初始化过程中会为系统中的GPI设备创建独立上下文对象(CUDA_Context)
- 然后,将设备代码加载主机程序中调用cudaDeviceReset时,CUDA上下文被销毁
CUDA_Context :GPU上进程执行的环境,包含了程序运行需要的所有资源(CPU资源、GPU资源、内存资源和程序计数器等等),上下文是可以切换的,主机线程是可以调用任意一个设备的Context并运行kenel函数!!
矩阵加法demo
cudaMalloc和cudaMemcpy
它们和c的malloc和memcpy功能一致,只是操作的不是host端的内存空间,而是device端的”显存空间“
cudaSetDevice
cudaSetDevice是用于针对主机线程指定Device的cudaAPI函数,接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个被指定的设备的。
error_check
error_check是我写的检查函数,用于检查你调用的cudaAPI函数是否调用失败或报错,如果失败,error_check会为你输出失败的原因、文件路径和代码行号。
main.cu
#include"common/common.h"
void data_inital(float* data,int N){
time_t t;
srand((unsigned)time(&t));
std::cout<<"data: ";
//初始化数据
for(int i=0;i<N;i++){
data[i] = (float)(rand()%0xff)/10.0f;
std::cout<<data[i]<<" ";
}
std::cout<<std::endl;
return;
};
__global__ void add(float* a, float* b,float* c,int N){
int threadID = threadIdx.y*blockDim.x+threadIdx.x;
if(threadID<N){
c[threadID] = a[threadID]+b[threadID];
}
}
int main(int argc, char** argv){
int deviceCount {0};
cudaDeviceProp deviceProp;
int driverVersion {0};
int runtimeVersion {0};
device_information(&deviceCount,&deviceProp,&driverVersion,&runtimeVersion);
std::cout<<std::endl;
cudaError_t error = error_check(cudaSetDevice(0),__FILE__,__LINE__);//针对主机线程指定Device,接下来主机中这个线程的后续的cuda平台的所有操作都是针对于这个设备的。
if(error == cudaSuccess)
{
std::cout<<"cudaSetDevice success!"<<std::endl;
std::cout<<"set on device:"<< deviceProp.name << std::endl;}
else
{
std::cout<<"cudaSetDevice failed!"<<std::endl;
return -1;}
int numElem = 16;
size_t nBytes = numElem * sizeof(float);
// 初始化主机端数据缓冲区
float *hostDataA, *hostDataB, *gpuRef;
hostDataA = (float*)malloc(nBytes);
hostDataB = (float*)malloc(nBytes);
gpuRef = (float*)malloc(nBytes);
if (hostDataA == NULL || hostDataB == NULL || gpuRef == NULL)
{
std::cout<<"malloc failed!"<<std::endl;
return -1;
}
data_inital(hostDataA,numElem); //初始化数据
data_inital(hostDataB,numElem); //初始化数据
memset(gpuRef, 0, nBytes);
// 初始化设备端数据缓冲区
float *deviceDataA, *deviceDataB, *deviceDataC;
cudaMalloc((float**)&deviceDataA, nBytes);//注意,cudaMalloc的修饰符为__host____device___,也就是说host和device都可以使用这个cudaAPI函数
cudaMalloc((float**)&deviceDataB, nBytes);
cudaMalloc((float**)&deviceDataC, nBytes);
if (deviceDataA == NULL || deviceDataB == NULL || deviceDataC == NULL){
std::cout<<"cudaMalloc failed!"<<std::endl;
free(hostDataA);
free(hostDataB);
free(gpuRef);
return -1;
}
if(cudaSuccess == cudaMemcpy(deviceDataA,hostDataA,nBytes,cudaMemcpyHostToDevice) &&
cudaSuccess == cudaMemcpy(deviceDataB,hostDataB,nBytes,cudaMemcpyHostToDevice) &&
cudaSuccess == cudaMemcpy(deviceDataC,gpuRef,nBytes,cudaMemcpyHostToDevice)) ///注意,cudaMemcpy的修饰符为__host__,也就是说只有host可以使用这个cudaAPI函数
{
std::cout<<"successfully copy data from host to device "<< deviceProp.name <<std::endl;
}
else
{
std::cout<<"copy data from host to device"<< deviceProp.name <<" failed!" <<std::endl;
free(hostDataA);
free(hostDataB);
free(gpuRef);
return -1;}
//加载核函数
dim3 block (4,4);
dim3 grid (1,1);
add<<<grid,block>>>(deviceDataA,deviceDataB,deviceDataC,numElem);
//将数据从设备端拷贝回主机端
cudaMemcpy(gpuRef,deviceDataC,nBytes,cudaMemcpyDeviceToHost);
//打印运算结果
std::cout<<"result: ";
for(size_t i = 0; i < numElem; i++)
std::cout<<gpuRef[i] << " ";
std::cout<<std::endl;
//释放资源
free(hostDataA);
free(hostDataB);
free(gpuRef);
cudaFree(deviceDataA);
cudaFree(deviceDataB);
cudaFree(deviceDataC);
cudaDeviceReset();
return 0;
}
common.h
#include<sys/time.h>
#include<iostream>
#include<cuda_runtime.h>
#include<stdio.h>
//用于检查你的cuda函数是否调用失败
cudaError_t error_check(cudaError_t status,const char *filePathName,int lineNumber){
if(status !=cudaSuccess)
{
std::cout << "CUDA API error " << cudaGetErrorName(status) << " at " << filePathName << " in line " << lineNumber << std::endl;
std::cout << "description :" << cudaGetErrorString(status) << std::endl;
return status;
}
return status;
}
bool device_information(int* ptr_devicCount,cudaDeviceProp* ptr_deviceProp,int* ptr_driverVersion,int* ptr_runtimeVersion){
cudaGetDeviceCount(ptr_devicCount);
if(*ptr_devicCount == 0){
std::cerr << "error: no devices supporting CUDA.\n";
return false;
}
else{
std::cout << "Detected " << *ptr_devicCount << " CUDA Capable device(s)\n";
}
for(int i {0}; i < *ptr_devicCount; i++){
cudaSetDevice(i);
error_check(cudaGetDeviceProperties(ptr_deviceProp,i),__FILE__,__LINE__);
std::cout << "Device " << i << " name: " << ptr_deviceProp->name << std::endl;
error_check(cudaDriverGetVersion(ptr_driverVersion),__FILE__,__LINE__);
error_check(cudaRuntimeGetVersion(ptr_runtimeVersion),__FILE__,__LINE__);
std::cout << "CUDA Driver Version / Runtime Version: " << *ptr_driverVersion/1000 << "." << (*ptr_driverVersion
%100)/10 << "." << *ptr_driverVersion%10 << "/" << *ptr_runtimeVersion/1000 << "."
<< (*ptr_runtimeVersion%100)/10 << "." << *ptr_runtimeVersion%10 << std::endl;
std::cout << "CUDA Capability Major/Minor version number: " << ptr_deviceProp->major << "." << ptr_deviceProp->minor << std::endl;
std::cout << "Total amount of global memory: " << ptr_deviceProp->totalGlobalMem << std::endl;
std::cout << "Total amount of constant memory: " << ptr_deviceProp->totalConstMem << std::endl;
std::cout << "Total amount of shared memory per block: " << ptr_deviceProp->sharedMemPerBlock << std::endl;
std::cout << "Total number of registers available per block: " << ptr_deviceProp->regsPerBlock << std::endl;
std::cout << "Warp size: " << ptr_deviceProp->warpSize << std::endl;
std::cout << "Maximum number of threads per block: " << ptr_deviceProp->maxThreadsPerBlock << std::endl;
std::cout << "Maximum sizes of each dimension of a block: " << ptr_deviceProp->maxThreadsDim[0] << " x "
<< ptr_deviceProp->maxThreadsDim[1] << " x " << ptr_deviceProp->maxThreadsDim[2] << std::endl;
std::cout << "Maximum sizes of each dimension of a grid: " << ptr_deviceProp->maxGridSize[0] << " x "
<< ptr_deviceProp->maxGridSize[1] << " x " << ptr_deviceProp->maxGridSize[2] << std::endl;
std::cout << "Maximum memory pitch: " << ptr_deviceProp->memPitch << std::endl;
std::cout << "Texture alignment: " << ptr_deviceProp->textureAlignment << std::endl;
std::cout << "Concurrent copy and execution: " << ptr_deviceProp->deviceOverlap << std::endl;
std::cout << "Run time limit on kernels: " << ptr_deviceProp->kernelExecTimeoutEnabled << std::endl;
std::cout << "Integrated: " << ptr_deviceProp->integrated << std::endl;
std::cout << "Support host page-locked memory mapping: " << ptr_deviceProp->canMapHostMemory << std::endl;
std::cout << "Alignment requirement for Surfaces: " << ptr_deviceProp->surfaceAlignment << std::endl;
std::cout << "Device has ECC support: " << ptr_deviceProp->ECCEnabled << std::endl;
std::cout << "Device is using TCC driver model: " << ptr_deviceProp->tccDriver << std::endl;
std::cout << "Device supports Unified Addressing (UVA): " << ptr_deviceProp->unifiedAddressing << std::endl;
std::cout << "Device supports Compute Preemption: " << ptr_deviceProp->computePreemptionSupported << std::endl;
}
return true;
}