CUDA实现时域波束形成

该代码示例展示了使用CUDA进行GPU加速的波束形成计算,对比了CPU和GPU在处理大型复数矩阵运算时的性能差异。通过对复数矩阵的时间延迟计算和并行处理,GPU在处理大量数据时显著减少了计算时间,尤其在增大阵元个数N时,加速效果更加明显。
摘要由CSDN通过智能技术生成
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <complex.h>
#include "cuComplex.h"
#include <typeinfo>   //输出变量类型所需头文件
#include <time.h>
 
typedef cuDoubleComplex complexd;
using namespace std;
#define pi acos(-1)
#define CHECK(res) if(res!=cudaSuccess){exit(-1);}
 
int  N = 70;  //阵元个数
int Len = 2400;   //采样点数
int theta_N = 1800;  //角度步进数
 
 
__device__ __host__ complexd operator*(complexd a, complexd b) { return cuCmul(a,b); } 
__device__ __host__ complexd operator+(complexd a, complexd b) { return cuCadd(a,b); } 
__device__ __host__ complexd operator/(complexd a, complexd b) { return cuCdiv(a,b); } 

 
__device__ __host__ complexd exp_(complexd arg)
{
   complexd res;    //定义一个复数
   double s, c;     
   double e = exp(arg.x);   
   sincos(arg.y, &s, &c);
   res.x = c * e;
   res.y = s * e;
   return res;
}
 
/*  CUDA核函数  */
__global__ void beamforming_nb(complexd* sig_out, complexd* sig_in,complexd* time_delay,int theta_N,int Len,int N)
{
    int row = threadIdx.x + blockDim.x * blockIdx.x;
    int col = threadIdx.y + blockDim.y * blockIdx.y;     //*:CUDA中的索引逻辑顺序为X>Y>Z
 
    // dim3 threadsPerBlocks(32, 32);  
    // dim3 numBlocks((theta_N + threadsPerBlocks.x -1)/threadsPerBlocks.x,(Len + threadsPerBlocks.y -1)/threadsPerBlocks.y);  
 
    complexd temp{0,0};
    complexd add_{0,0};
    if (row < theta_N && col < Len)   // 试试使用两个if呢?
    {
        for (int i = 0; i < N; i++) 
        {
            //theta_N x Len大小的矩阵sig_out = time_delay矩阵的row行i列 X sig_in矩阵i行col列
            temp = temp + time_delay[row * N + i] * sig_in[col + i * Len];
        }
        sig_out[row * Len + col] = temp;
        temp = add_;
    }
    /*  
        存在问题:
        1. 解决theta_N*N矩阵和N*Len矩阵的并行计算问题;   答:已解决,用if (row < theta_N && col < Len)替换外围两个for循环,详见success.cu
        2. 数组大小超过单个block所含i线程大小的计算;  答:暂时不用考虑
        3. CUDA的blocks和线程调用;   答:需要进一步学习。
        4. 让更多的计算容纳到CUDA计算核中;   
        5. CUDA的核计算中如何做到循环。   答:使用if实现
    */
}
 
 
int main(int argc, char ** argv)
{
    /*  初始参数定义  */
    const double c = 1500;  //介质声速
    //const int N = 70;     //传感器数量
    const double T = 1;     //采样时长
    const int FS = 2400;    //采样频率
    auto LEN = T*FS;        //采样点数
    double t[Len];          //时间长度
    for(int i = 0;i<Len;i++)
    {
    	t[i] = i/LEN;
        // cout <<t[i]<<endl;    //验证通过
    }
    const double f0 = 300;  //频率为300
    const double d = 0.27;  //传感器间距为0.27m
    const double deg2rad = pi/180;      // cos是弧度
    // cout << pi << endl; 验证通过
    const double theta = 60;            //目标方位角角度制
    const double theta_rad = theta * deg2rad;  //目标方位角弧度制
	cudaError_t res;
 
    /*  原始信号定义  */
    complexd* sig_ = NULL;      //原始信号列表,长度为:采样频率*采样时间
       sig_ = (complexd*)malloc(Len*sizeof(complexd));
 
    for(int i = 0;i<Len;i++)
    {
        complexd temp{0,2*pi*f0*t[i]};
        sig_[i] = exp_(temp);      //原始信号
        // cout << cuCreal(sig_[i]) << '+' << cuCimag(sig_[i])<< 'i' <<'\n';     //验证通过
    }
     
    /*  加入驾驶向量  */
    complexd* sig = NULL;         //定义未加入噪声信号                   
    sig = (complexd*)malloc(N*Len*sizeof(complexd));

    for(int i = 0;i<N;i++)
    {
        complexd steer{0,2*pi*f0*cos(theta_rad)*i*d/c};  //驾驶向量
        // cout << cuCreal(exp_(steer)) << '+' << cuCimag(exp_(steer))<< 'i' << endl;   //验证通过
        for(int j = 0;j<Len;j++)
        {
            sig[i*Len+j] = sig_[j] * exp_(steer);     //未加入噪声信号.N*Len
            // cout << cuCreal(sig[i*Len+j]) << '+' << cuCimag(sig[i*Len+j])<< 'i' <<'\n';  //验证通过
        }
    }
 
    /*  加入噪声   */ 
 
 
    /*  计算并加入theta_stp  */
    double theta_n = 1800;
    complexd *t_delay;
    t_delay = (complexd*)malloc(theta_N*N*sizeof(complexd));

    for(int i = 0;i<theta_N;i++)
    {
        for(int j = 0 ;j<N;j++)
        {
            complexd tao{0,j*2*pi*f0*cos(i*(180/theta_n)*deg2rad)*d/c};
            // cout << j*2*pi*f0*cos(i*(180/theta_n)*deg2rad)*d/c << '\t' <<j <<endl;   //验证通过
            t_delay[i*N + j] = exp_(cuConj(tao)); //第i个角度下的N个补偿,最终得到theta_N*N矩阵
            // cout << cuCreal(exp_(cuConj(tao))) << '+' << cuCimag(exp_(cuConj(tao)))<< 'i' <<'\n';  //验证通过
        }
    }
 
    /*  CPU计算    */
    double ttt;  
    clock_t at, bt;
   	at = clock();
    complexd *h_pt;
    h_pt = (complexd *)malloc(theta_N*Len*sizeof(complexd));
    complexd h_temp{0,0};
    complexd a_temp{0,0};
    for(int i = 0 ; i < theta_N; i ++ )
    {
        for(int j = 0; j < Len ;  j ++)
        {
            for(int k = 0 ; k <N; k ++)
            {
                h_temp = h_temp + t_delay[i * N + k] * sig[k * Len + j];   // i行k列 x k行j列 = i行j列
            }
            h_pt[i * Len + j] = h_temp;
            h_temp = a_temp;
        }
    }
    bt = clock();
    ttt = double(bt-at)/CLOCKS_PER_SEC;
    cout << ttt << "s" << endl;
 
 
    
    /*  CUDA加速  */
    complexd *sig_in;
    complexd *time_delay;
    complexd *sig_out;
 
 
    res = cudaMalloc((void**)&sig_in,N*Len*sizeof(complexd));CHECK(res)
    res = cudaMalloc((void**)&sig_out,theta_N*Len*sizeof(complexd));CHECK(res)
    res = cudaMalloc((void**)&time_delay,theta_N*N*sizeof(complexd));CHECK(res)
 
    res = cudaMemcpy(sig_in,sig,N*Len*sizeof(complexd),cudaMemcpyHostToDevice);CHECK(res)
    res = cudaMemcpy(time_delay,t_delay,theta_N*N*sizeof(complexd),cudaMemcpyHostToDevice);CHECK(res)
 
    dim3 threadsPerBlocks(24,24);  
    // dim3 numBlocks((Len+threadsPerBlocks.x-1)/threadsPerBlocks.x,(theta_N+threadsPerBlocks.y-1)/threadsPerBlocks.y);
    dim3 numBlocks((theta_N)/threadsPerBlocks.x,(Len)/threadsPerBlocks.y);
    // cout << numBlocks.x << "," << numBlocks.y << endl;
 
    /*
    Jetson Orin 模块包含以下内容:NVIDIA Ampere架构GPU,
    具有多达2048个CUDA 核、多达64个Tensor核多达12个Arm A78AE CPU核
    */
 
    double tt;  
    clock_t a, b;
   	a = clock();
    beamforming_nb<<<numBlocks,threadsPerBlocks>>>(sig_out,sig_in,time_delay,theta_N,Len,N);   // (grid, block)
	cudaDeviceSynchronize();
	b = clock();
    tt = double(b-a)/CLOCKS_PER_SEC;
    cout << tt << "s" << endl;
 
 
    complexd *pt_sig = NULL;  //定义输出列表
 
    pt_sig = (complexd*)malloc(theta_N*Len*sizeof(complexd));   //输出信息
    res = cudaMemcpy(pt_sig,sig_out,theta_N*Len*sizeof(complexd),cudaMemcpyDeviceToHost);CHECK(res)
 
    bool is_right = true;
    for(int i = 0 ; i <theta_N;i++)
    {
        for(int j = 0 ; j < Len;j++)
        {
            // if(cuCreal(pt_sig[(i*Len+j)]) != cuCreal(h_pt[i*Len+j]) || cuCimag(pt_sig[(i*Len+j)]) != cuCimag(h_pt[i*Len+j]) )
            if(((cuCreal(pt_sig[(i*Len+j)]) - cuCreal(h_pt[(i*Len+j)])) > 1e-8) || ((cuCimag(pt_sig[(i*Len+j)]) - cuCimag(h_pt[(i*Len+j)])) > 1e-8))
            {
                is_right = false;
                // cout << "GPU:" << cuCreal(pt_sig[(i*Len+j)]) - cuCreal(h_pt[(i*Len+j)])<< '+' << cuCimag(pt_sig[(i*Len+j)]) - cuCimag(h_pt[(i*Len+j)])<< 'i' << '\n';            
                cout << "第" << i*Len+j << "个" << "数据不正确" << endl;
                cout << "GPU:" << cuCreal(pt_sig[(i*Len+j)])<< '+' << cuCimag(pt_sig[(i*Len+j)]) << 'i' << '\n';
                cout << "CPU:" << cuCreal(h_pt[(i*Len+j)])<< '+' << cuCimag(h_pt[(i*Len+j)]) << 'i' << '\n';
            }            
        }
    }
 
    printf("The result is %s!\n",is_right?"right":"false");
    cout << cuCreal(pt_sig[(0)])<< '+' << cuCimag(pt_sig[(0)]) << 'i' << '\n';
    cout << cuCreal(h_pt[(0)])<< '+' << cuCimag(h_pt[(0)]) << 'i' << '\n';
 
    cudaFree(sig_in);
    cudaFree(time_delay);
    cudaFree(sig_out);
    free(pt_sig);
    free(h_pt);
    free(sig_);
    free(sig);
    free(t_delay);
    return 0;
}

结果输出为:

可以看到,CPU计算和GPU加速计算两种方法的耗时相差十分巨大,此仅为采样时间为1s,采样频率为2400的信号波束形成计算,后面又尝试将N = 70改为N = 700,CPU计算时间扩大了11倍左右,而GPU加速计算耗时仅为N = 70时的5倍左右。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值