cuda 程序

编译方法: nvcc -o process process.cu

#include "./book.h"
#define N 1028
__global__ void add(int *a, int *b, int *c) {
     int tid = blockIdx.x;
     printf ("bianhao %d\n",tid);
     if (tid < N)
         c[tid] = a[tid] + b[tid];
}
__global__ void add_all_type(int *a, int *b, int *c) {
     int tid = threadIdx.x + blockIdx.x * blockDim.x;
     printf ("bianhao %d\n",tid);
     if (!tid) {
     printf("gridDim.x = %d ",gridDim.x);
     }
     while (tid < N) {
           c[tid] = a[tid] + b[tid];
           tid += blockDim.x * gridDim.x;
}
}
int main(void) {
    // printf("gridDim.x = %d ",gridDim.x);
    int a[N],b[N],c[N];
    int *dev_a,*dev_b,*dev_c;
    HANDLE_ERROR(cudaMalloc((void **)&dev_a,N * sizeof(int)));
        HANDLE_ERROR(cudaMalloc((void **)&dev_b,N * sizeof(int)));
        HANDLE_ERROR(cudaMalloc((void **)&dev_c,N * sizeof(int)));
    for (int i = 0;i < N;i++) {
       a[i] = -i;
       b[i] = i * i;     
}
    HANDLE_ERROR(cudaMemcpy(dev_a,a,N * sizeof(int),cudaMemcpyHostToDevice));
       HANDLE_ERROR(cudaMemcpy(dev_b,b,N * sizeof(int),cudaMemcpyHostToDevice));
    add_all_type <<<N / 2,2>>>(dev_a,dev_b,dev_c);
    HANDLE_ERROR(cudaMemcpy(c,dev_c,N * sizeof(int),cudaMemcpyHostToHost));
    for (int i = 0;i < N;i++) {
        printf("%d + %d = %d\n",a[i],b[i],c[i]);
}
   cudaFree(dev_a);
    cudaFree(dev_b);
   cudaFree(dev_c);
 
   return 0;
}


book.h 模版:

  

/*
 * Copyright 1993-2010 NVIDIA Corporation.  All rights reserved.
 *
 * NVIDIA Corporation and its licensors retain all intellectual property and
 * proprietary rights in and to this software and related documentation.
 * Any use, reproduction, disclosure, or distribution of this software
 * and related documentation without an express license agreement from
 * NVIDIA Corporation is strictly prohibited.
 *
 * Please refer to the applicable NVIDIA end user license agreement (EULA)
 * associated with this source code for terms and conditions that govern
 * your use of this NVIDIA software.
 *
 */


#ifndef __BOOK_H__
#define __BOOK_H__
#include <stdio.h>

static void HandleError( cudaError_t err,
                         const char *file,
                         int line ) {
    if (err != cudaSuccess) {
        printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
                file, line );
        exit( EXIT_FAILURE );
    }
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))


#define HANDLE_NULL( a ) {if (a == NULL) { \
                            printf( "Host memory failed in %s at line %d\n", \
                                    __FILE__, __LINE__ ); \
                            exit( EXIT_FAILURE );}}

template< typename T >
void swap( T& a, T& b ) {
    T t = a;
    a = b;
    b = t;
}


void* big_random_block( int size ) {
    unsigned char *data = (unsigned char*)malloc( size );
    HANDLE_NULL( data );
    for (int i=0; i<size; i++)
        data[i] = rand();

    return data;
}

int* big_random_block_int( int size ) {
    int *data = (int*)malloc( size * sizeof(int) );
    HANDLE_NULL( data );
    for (int i=0; i<size; i++)
        data[i] = rand();

    return data;
}


// a place for common kernels - starts here

__device__ unsigned char value( float n1, float n2, int hue ) {
    if (hue > 360)      hue -= 360;
    else if (hue < 0)   hue += 360;

    if (hue < 60)
        return (unsigned char)(255 * (n1 + (n2-n1)*hue/60));
    if (hue < 180)
        return (unsigned char)(255 * n2);
    if (hue < 240)
        return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60));
    return (unsigned char)(255 * n1);
}

__global__ void float_to_color( unsigned char *optr,
                              const float *outSrc ) {
    // map from threadIdx/BlockIdx to pixel position
    int x = threadIdx.x + blockIdx.x * blockDim.x;
    int y = threadIdx.y + blockIdx.y * blockDim.y;
    int offset = x + y * blockDim.x * gridDim.x;

    float l = outSrc[offset];
    float s = 1;
    int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
    float m1, m2;

    if (l <= 0.5f)
        m2 = l * (1 + s);
    else
        m2 = l + s - l * s;
    m1 = 2 * l - m2;

    optr[offset*4 + 0] = value( m1, m2, h+120 );
    optr[offset*4 + 1] = value( m1, m2, h );
    optr[offset*4 + 2] = value( m1, m2, h -120 );
    optr[offset*4 + 3] = 255;
}

__global__ void float_to_color( uchar4 *optr,
                              const float *outSrc ) {
    // map from threadIdx/BlockIdx to pixel position
    int x = threadIdx.x + blockIdx.x * blockDim.x;
    int y = threadIdx.y + blockIdx.y * blockDim.y;
    int offset = x + y * blockDim.x * gridDim.x;

    float l = outSrc[offset];
    float s = 1;
    int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
    float m1, m2;

    if (l <= 0.5f)
        m2 = l * (1 + s);
    else
        m2 = l + s - l * s;
    m1 = 2 * l - m2;

    optr[offset].x = value( m1, m2, h+120 );
    optr[offset].y = value( m1, m2, h );
    optr[offset].z = value( m1, m2, h -120 );
    optr[offset].w = 255;
}


#if _WIN32
    //Windows threads.
    #include <windows.h>

    typedef HANDLE CUTThread;
    typedef unsigned (WINAPI *CUT_THREADROUTINE)(void *);

    #define CUT_THREADPROC unsigned WINAPI
    #define  CUT_THREADEND return 0

#else
    //POSIX threads.
    #include <pthread.h>

    typedef pthread_t CUTThread;
    typedef void *(*CUT_THREADROUTINE)(void *);

    #define CUT_THREADPROC void
    #define  CUT_THREADEND
#endif

//Create thread.
CUTThread start_thread( CUT_THREADROUTINE, void *data );

//Wait for thread to finish.
void end_thread( CUTThread thread );

//Destroy thread.
void destroy_thread( CUTThread thread );

//Wait for multiple threads.
void wait_for_threads( const CUTThread *threads, int num );

#if _WIN32
    //Create thread
    CUTThread start_thread(CUT_THREADROUTINE func, void *data){
        return CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)func, data, 0, NULL);
    }

    //Wait for thread to finish
    void end_thread(CUTThread thread){
        WaitForSingleObject(thread, INFINITE);
        CloseHandle(thread);
    }

    //Destroy thread
    void destroy_thread( CUTThread thread ){
        TerminateThread(thread, 0);
        CloseHandle(thread);
    }

    //Wait for multiple threads
    void wait_for_threads(const CUTThread * threads, int num){
        WaitForMultipleObjects(num, threads, true, INFINITE);

        for(int i = 0; i < num; i++)
            CloseHandle(threads[i]);
    }

#else
    //Create thread
    CUTThread start_thread(CUT_THREADROUTINE func, void * data){
        pthread_t thread;
        pthread_create(&thread, NULL, func, data);
        return thread;
    }

    //Wait for thread to finish
    void end_thread(CUTThread thread){
        pthread_join(thread, NULL);
    }

    //Destroy thread
    void destroy_thread( CUTThread thread ){
        pthread_cancel(thread);
    }

    //Wait for multiple threads
    void wait_for_threads(const CUTThread * threads, int num){
        for(int i = 0; i < num; i++)
            end_thread( threads[i] );
    }

#endif




#endif  // __BOOK_H__


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
编写CUDA程序主要包含以下步骤: 1. 定义GPU核心函数:使用`__global__`关键字修饰函数,表示该函数在GPU上运行。例如: ``` __global__ void myKernel(float* input, float* output, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { output[tid] = input[tid] * 2; } } ``` 2. 在主函数中分配GPU内存:使用`cudaMalloc`函数分配GPU内存,并使用`cudaMemcpy`函数将数据从主机内存拷贝到GPU内存。例如: ``` int size = 100; float* input_host = (float*)malloc(size * sizeof(float)); float* output_host = (float*)malloc(size * sizeof(float)); for (int i = 0; i < size; i++) { input_host[i] = i; } float* input_device; float* output_device; cudaMalloc(&input_device, size * sizeof(float)); cudaMalloc(&output_device, size * sizeof(float)); cudaMemcpy(input_device, input_host, size * sizeof(float), cudaMemcpyHostToDevice); ``` 3. 调用GPU核心函数:使用`<<<blocks, threads>>>`语法调用GPU核心函数。其中,`blocks`表示块的数量,`threads`表示每个块中线程的数量。例如: ``` int threads_per_block = 32; int blocks_per_grid = (size + threads_per_block - 1) / threads_per_block; myKernel<<<blocks_per_grid, threads_per_block>>>(input_device, output_device, size); ``` 4. 将计算结果从GPU内存拷贝回主机内存:使用`cudaMemcpy`函数将计算结果从GPU内存拷贝回主机内存。例如: ``` cudaMemcpy(output_host, output_device, size * sizeof(float), cudaMemcpyDeviceToHost); ``` 5. 释放GPU内存:使用`cudaFree`函数释放GPU内存。例如: ``` cudaFree(input_device); cudaFree(output_device); ``` 需要注意的是,CUDA程序的编写需要掌握GPU并行计算的原理和CUDA API函数的使用方法,同时需要对GPU硬件和CUDA程序进行优化,以提高程序的性能。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值