1、GPU 上短长度的向量相加
//由于每个block共享内存,所以在一个block 内执行一次求和。选0是因为每个block都至少有一个线程0吧
//此程序相当于计算0,1,...N-1的平方和
//测试向量内积的正确性
long size,
unsigned int *histo ) {
// clear out the accumulation buffer called temp
// since we are launched with 256 threads, it is easy
// to clear that memory with one write per thread
__shared__
unsigned int temp[256];
temp[threadIdx.x] = 0;
//初始化共享内存
__syncthreads();
// calculate the starting index and the offset to the next
// block that each thread will be processing
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
atomicAdd( &temp[buffer[i]], 1 );
i += stride;
}
}
再来看内积的kernel
__global__ void dot( int *a, int *b, int *c ) {
__shared__ int temp[THREADS_PER_BLOCK];
//每一个block内共享内存
index =
threadIdx.x + blockIdx.x * blockDim.x;
//j计算线程的索引号。
temp[threadIdx.x] = a[index] * b[index];
//每个线程分别计算乘积
__syncthreads();
//同步,等待所有乘积都计算好了
if( 0 == threadIdx.x ) {
int sum = 0;
for( int i = 0; i < THREADS_PER_BLOCK; i++ )
sum += temp[i];
atomicAdd( c , sum );
//原子操作
}
}
main中 使用的方法
#define N (2048*2048)
//向量的总长度
#define THREADS_PER_BLOCK 512
dot<<< N+(THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( dev_a, dev_b, dev_c );
注意
<<>>
对于任意长度的向量内积。仿照任意长度向量相加,
http://blog.csdn.net/zhanglei0107/article/details/7328347
//计算向量的内积程序
#include
#define imin(a,b)
(a
//N为输入的向量的规模
const int N=33*1024;
const int threadsPerBlock=256;
const int blocksPerGrid=
imin(32,(N+threadsPerBlock-1)/threadsPerBlock);
__global__ void dot(float *a,float *b,float *c)
{
//每一个块上都有cache变量的拷贝,相互之间不影响
__shared__ float cache[threadsPerBlock];
//tid为线程的偏移量
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int cacheIndex=threadIdx.x;
float temp=0;
while(tid
{
temp+=a[tid]*b[tid];
//增加的下标量为进程总数
tid+=blockDim.x*gridDim.x;
}
cache[cacheIndex]=temp;
//同步化当前块上的线程
__syncthreads();
int i=blockDim.x/2;
//在块内计算部分和
while(i!=0)
{
if(cacheIndex
cache[cacheIndex]+=cache[cacheIndex+i];
__syncthreads();
i/=2;
}
if(cacheIndex==0)
c[blockIdx.x]=cache[0];
}
橙色这段用了归约法
int main(void)
{
float *a,*b,c,*partial_c;
float *dev_a,*dev_b,*dev_partial_c;
a=(float*)malloc(N*sizeof(float));
b=(float*)malloc(N*sizeof(float));
partial_c=(float*)malloc(blocksPerGrid*sizeof(float));
cudaMalloc((void**)&dev_a,N*sizeof(float));
cudaMalloc((void**)&dev_b,N*sizeof(float));
cudaMalloc((void**)&dev_partial_c,blocksPerGrid*sizeof(float));
for(int i=0;i
{
a[i]=i;
b[i]=i*2;
}
cudaMemcpy(dev_a,a,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*sizeof(float),cudaMemcpyHostToDevice);
dot<<>>(dev_a,dev_b,dev_partial_c);
cudaMemcpy(partial_c,dev_partial_c,blocksPerGrid*sizeof(float),cudaMemcpyDeviceToHost);
c=0;
for(int i=0;i
c+=partial_c[i];
#define sum_squares(x)
(x*(x+1)*(2*x+1)/6)
printf("Does GPU value %.6g = %.6g?\n",c,
2*sum_squares((float)(N-1)));
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
//测试输出
int j;
scanf("%d",&j);
}
直方图的kernel
__global__ void histo_kernel( unsigned char *buffer,
__syncthreads();
atomicAdd(&(histo[threadIdx.x]), temp[threadIdx.x]);
在GPU上多线程同时对数据集中的数据进行统计。为了保证计算过程中只有一个线程在对计算结果做改动,CUDA C使用原子操作的方式。