CUDA5.0+VS2008+VisualAssist安装
转自:http://blog.163.com/qimo601@126/blog/static/15822093201352084242102/
1、软硬件环境
操作系统:Windows7 台式工控机
GPU:Geforce GT420(办公室另外一台台式机的显卡)
开发环境:VisualStudio2008、cuda_5.0.35_winvista_win7_win8_general_32-3、VA_X_Setup1845
2、软件准备
CUDA官方网站https://developer.nvidia.com/cuda-downloads
CUDA5.0安装包下载(CUDA5.0已经集成了,显卡驱动、CUDA ToolKit、 CUDA SDK 代码例子)
3、安装过程
3.1先安装VS2008,再安装VisualAssist X
VA是为了方便程序编写,不是必须安装。
3.2 CUDA5.0 installer安装过程
直接默认安装,自动安装CUDA Toolkit、CUDA SDK code samples ,和开发者驱动。
CUDAToolkit 默认安装目录:C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v5.0
CUDA SDK 的默认安装目录:C:\ProgramData\NVIDIA Corporation\CUDA Samples\v5.0
3.3 配置环境变量
安装完成Toolkit和SDK后,已自动配置好系统环境变量。可以查看系统环境变量,发现自动新建如下变量:
CUDA_PATH_V5_0 = C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v5.0\
NVCUDASAMPLES_ROOT = C:\ProgramData\NVIDIA Corporation\CUDA Samples\v5.0\
NVCUDASAMPLES5_0_ROOT = C:\ProgramData\NVIDIA Corporation\CUDA Samples\v5.0\
NVTOOLSEXT_PATH = C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt\
Path = C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v5.0\bin\;C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v5.0\libnvvp\;C:\Program Files\NVIDIA Corporation\PhysX\Common;C:\Program Files\Intel\iCLS Client\;%SystemRoot%\system32;%SystemRoot%;%SystemRoot%\System32\Wbem;%SYSTEMROOT%\System32\WindowsPowerShell\v1.0\;C:\Program Files\Intel\OpenCL SDK\2.0\bin\x86;C:\Program Files\Intel\Intel(R) Management Engine Components\DAL;C:\Program Files\Intel\Intel(R) Management Engine Components\IPT;c:\Program Files\Microsoft SQL Server\90\Tools\binn\;C:\Qt\4.7.4\bin
运行CUDA自带的实例,如下错误
3.4 重启计算机,上述问题就不会出错了
在vs2008上建立一个CUDA测试项目
编译成功!
- #include "cuda_runtime.h"
- #include "device_launch_parameters.h"
- #include <stdio.h>
- #include <time.h>
- cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size);
- __global__ void addKernel(int *c, const int *a, const int *b)
- {
- int i = threadIdx.x + blockIdx.x * gridDim.x;
- c[i] = a[i] + b[i] + c[i] ;
- }
- int main()
- {
- const int arraySize = 10000;
- int a[arraySize] = { 1, 2, 3, 4, 5 };
- int b[arraySize] = { 10, 20, 30, 40, 50 };
- for(int i = 0; i < arraySize; i++)
- {
- a[i] = i;
- b[i] = i*10;
- }
- int c[arraySize] = { 0 };
- clock_t start, finish;
- double duration;
- start = clock();
- // Add vectors in parallel.
- cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "addWithCuda failed!");
- return 1;
- }
- for(int l = 0; l < arraySize; l++)
- printf("c[%d] = %d \n",l,c[l]);
- // cudaThreadExit must be called before exiting in order for profiling and
- // tracing tools such as Nsight and Visual Profiler to show complete traces.
- cudaStatus = cudaThreadExit();
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaThreadExit failed!");
- return 1;
- }
- finish = clock();
- duration = (double)(finish - start) / CLOCKS_PER_SEC;
- printf( "%f seconds\n", duration );
- getchar();
- return 0;
- }
- // Helper function for using CUDA to add vectors in parallel.
- cudaError_t addWithCuda(int *c, const int *a, const int *b, size_t size)
- {
- int *dev_a = 0;
- int *dev_b = 0;
- int *dev_c = 0;
- cudaError_t cudaStatus;
- // Choose which GPU to run on, change this on a multi-GPU system.
- cudaStatus = cudaSetDevice(0);
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
- goto Error;
- }
- // Allocate GPU buffers for three vectors (two input, one output) .
- cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaMalloc failed!");
- goto Error;
- }
- cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaMalloc failed!");
- goto Error;
- }
- cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaMalloc failed!");
- goto Error;
- }
- // Copy input vectors from host memory to GPU buffers.
- cudaStatus = cudaMemcpy(dev_c, c, size * sizeof(int), cudaMemcpyHostToDevice);
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaMemcpy failed!");
- goto Error;
- }
- // Copy input vectors from host memory to GPU buffers.
- cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaMemcpy failed!");
- goto Error;
- }
- cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaMemcpy failed!");
- goto Error;
- }
- // Launch a kernel on the GPU with one thread for each element.
- addKernel<<<100, 100>>>(dev_c, dev_a, dev_b);
- // cudaThreadSynchronize waits for the kernel to finish, and returns
- // any errors encountered during the launch.
- cudaStatus = cudaThreadSynchronize();
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
- goto Error;
- }
- // Copy output vector from GPU buffer to host memory.
- cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
- if (cudaStatus != cudaSuccess) {
- fprintf(stderr, "cudaMemcpy failed!");
- goto Error;
- }
- Error:
- cudaFree(dev_c);
- cudaFree(dev_a);
- cudaFree(dev_b);
- return cudaStatus;
- }