cuda矩阵乘法

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <iostream>

#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#define M 500
#define K 500
#define N 500

#define BLOCK_SIZE 16 //块的大小,每个线程计算每个块

//创建随机数组
void initial(double* array, int size)
{
	for (int i = 0; i < size; i++)
	{
		array[i] = (double)(rand() % 10 + 1);
	}
}

//cpu矩阵乘法
void  multiplicateMatrixOnHost(double* array_A, double* array_B, double* array_C, int M_p, int K_p, int N_p)
{
	for (int i = 0; i < M_p; i++)
	{
		for (int j = 0; j < N_p; j++)
		{
			double sum = 0;
			for (int k = 0; k < K_p; k++)
			{
				sum += array_A[i * K_p + k] * array_B[k * N_p + j];
			}
			array_C[i * N_p + j] = sum;
		}
	}

}

//gpu矩阵乘法
__global__ void multiplicateMatrixOnDevice(double* array_A, double* array_B, double* array_C, int M_p, int K_p, int N_p)
{
	int ix = threadIdx.x + blockDim.x * blockIdx.x;//col  number行
	int iy = threadIdx.y + blockDim.y * blockIdx.y;//row number列

	if (ix < N_p && iy < M_p)
	{
		double sum = 0;
		for (int k = 0; k < K_p; k++)
		{
			sum += array_A[iy * K_p + k] * array_B[k * N_p + ix];
		}
		array_C[iy * N_p + ix] = sum;
	}
}

//使用共享内存的gpu矩阵乘法    Compute C = A * B
__global__ void matrixMultiplyShared(double* A, double* B, double* C,
	int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns)
{
	__shared__ double sharedM[BLOCK_SIZE][BLOCK_SIZE];
	__shared__ double sharedN[BLOCK_SIZE][BLOCK_SIZE];

	int bx = blockIdx.x;
	int by = blockIdx.y;
	int tx = threadIdx.x;
	int ty = threadIdx.y;

	int row = by * BLOCK_SIZE + ty;
	int col = bx * BLOCK_SIZE + tx;

	float Csub = 0.0;
	//i当前阶段的索引
	for (int i = 0; i < (int)(ceil((double)numAColumns / BLOCK_SIZE)); i++)
	{
		//从A和B中各取一个元素存入共享内存
		if (i * BLOCK_SIZE + tx < numAColumns && row < numARows)
			sharedM[ty][tx] = A[row * numAColumns + i * BLOCK_SIZE + tx];
		else
			sharedM[ty][tx] = 0.0;

		if (i * BLOCK_SIZE + ty < numBRows && col < numBColumns)
			sharedN[ty][tx] = B[(i * BLOCK_SIZE + ty) * numBColumns + col];
		else
			sharedN[ty][tx] = 0.0;
		//等待block内所有线程,即整个瓦片存入共享内存中
		__syncthreads();

		//累加点乘的子集
		for (int j = 0; j < BLOCK_SIZE; j++)
			Csub += sharedM[ty][j] * sharedN[j][tx];
		__syncthreads();
	}
	//把最终结果写入global memory
	if (row < numCRows && col < numCColumns)
		C[row * numCColumns + col] = Csub;
}


int main(int argc, char** argv)
{
	//矩阵大小
	int Axy = M * K;
	int Bxy = K * N;
	int Cxy = M * N;

	//创建矩阵,分配内存
	double* h_A, * h_B, * hostRef, * deviceRef,*sharedeviceRef;
	h_A = (double*)malloc(Axy * sizeof(double));
	h_B = (double*)malloc(Bxy * sizeof(double));

	int nBytes = M * N * sizeof(double);
	hostRef = (double*)malloc(Cxy * sizeof(double));//hostRef用来接收cpu矩阵乘法结果
	deviceRef = (double*)malloc(Cxy * sizeof(double));//deviceRef用于接收gpu矩阵乘法结果
	sharedeviceRef= (double*)malloc(Cxy * sizeof(double));

	//创建随机数组
	initial(h_A, Axy);
	initial(h_B, Bxy);
	/*******************************   CPU测试代码   *********************************/
	clock_t cpu_start, cpu_stop;
	cpu_start = clock();
	multiplicateMatrixOnHost(h_A, h_B, hostRef, M, K, N);
	cpu_stop = clock();
	double time;
	time = (double)(cpu_stop - cpu_start) / CLOCKS_PER_SEC * 1000;// / CLOCKS_PER_SEC
	cout << "CPU 用时: " << time << "ms" << endl;

	/*******************************   GPU测试代码   *********************************/
	//在显卡上创建矩阵变量,并分配内存
	double* d_A, * d_B, * d_C;
	cudaMalloc((void**)&d_A, Axy * sizeof(double));
	cudaMalloc((void**)&d_B, Bxy * sizeof(double));
	cudaMalloc((void**)&d_C, Cxy * sizeof(double));

	//把cpu上的数组传到gpu,h_a-d_a
	cudaMemcpy(d_A, h_A, Axy * sizeof(double), cudaMemcpyHostToDevice);
	cudaMemcpy(d_B, h_B, Bxy * sizeof(double), cudaMemcpyHostToDevice);


	int dimx = BLOCK_SIZE;
	int dimy = BLOCK_SIZE;
	dim3 block(dimx, dimy);
	dim3 grid((M + block.x - 1) / block.x, (N + block.y - 1) / block.y);

	//cuda计时,并运行核函数
	cudaEvent_t gpustart, gpustop;
	float elapsedTime = 0.0;
	cudaEventCreate(&gpustart);
	cudaEventCreate(&gpustop);
	cudaEventRecord(gpustart, 0);
	multiplicateMatrixOnDevice << < grid, block >> > (d_A, d_B, d_C, M, K, N);
	//cudaDeviceSynchronize();
	cudaEventRecord(gpustop, 0);
	cudaEventSynchronize(gpustop);
	cudaEventElapsedTime(&elapsedTime, gpustart, gpustop);
	cudaEventDestroy(gpustart);
	cudaEventDestroy(gpustop);

	//把gpu上的计算结果传回cpu
	cudaMemcpy(deviceRef, d_C, Cxy * sizeof(double), cudaMemcpyDeviceToHost);
	printf("Matrix_deviceRef: (%d×%d)  <<<(%d,%d),(%d,%d)>>>  GPU运行时间为:%fms\n",
		M, N, grid.x, grid.y, block.x, block.y, elapsedTime);

	/*******************************   GPU测试代码,使用共享内存   *********************************/
	//在显卡上创建矩阵变量,并分配内存
	double* d_A1, * d_B1, * d_C1;
	cudaMalloc((void**)&d_A1, Axy * sizeof(double));
	cudaMalloc((void**)&d_B1, Bxy * sizeof(double));
	cudaMalloc((void**)&d_C1, Cxy * sizeof(double));

	//把cpu上的数组传到gpu,h_a-d_a
	cudaMemcpy(d_A1, h_A, Axy * sizeof(double), cudaMemcpyHostToDevice);
	cudaMemcpy(d_B1, h_B, Bxy * sizeof(double), cudaMemcpyHostToDevice);
	//计时,并运行核函数
	cudaEvent_t start2, stop2;
	cudaEventCreate(&start2);
	cudaEventCreate(&stop2);
	cudaEventRecord(start2,0);
	matrixMultiplyShared << < grid, block >> > (d_A1, d_B1, d_C1, M, K, K, N, M, N);
	cudaEventRecord(stop2,0);
	cudaEventSynchronize(stop2);
	float time1;
	cudaEventElapsedTime(&time1, start2, stop2);
	cudaEventDestroy(start2);
	cudaEventDestroy(stop2);

	//把gpu上的计算结果传回cpu
	cudaMemcpy(sharedeviceRef, d_C1, Cxy * sizeof(double), cudaMemcpyDeviceToHost);

	cout << "使用共享内存,GPU用时: " << time1 << "ms" << endl;
	printf("CPU结果 某元素%g\n",hostRef[15]);
	printf("GPU结果 某元素%g\n", deviceRef[15]);
	printf("GPU共享内存结果 某元素%g\n", sharedeviceRef[15]);
	//验证正确性与精确性,cpu与gpu对比
	float max_err = 0;
	float average_err = 0;
	int n = M;
	for (int i = 0; i < n; i++)
	{
		for (int j = 0; j < n; j++)
		{
			if (hostRef[i * n + j] != 0)
			{
				//fabs求浮点数x的绝对值
				float err = fabs((deviceRef[i * n + j] - hostRef[i * n + j]) / hostRef[i * n + j]);

				if (max_err < err) max_err = err;

				average_err += err;
			}
		}
	}
	printf("不使用共享内存,cpu与gpu对比:Max error: %g Average error: %g\n", max_err, average_err / (n * n));
	//验证正确性与精确性,cpu与gpu对比
	float max_err2 = 0;
	float average_err2 = 0;
	for (int i = 0; i < n; i++)
	{
		for (int j = 0; j < n; j++)
		{
			if (hostRef[i * n + j] != 0)
			{
				//fabs求浮点数x的绝对值
				float err = fabs((sharedeviceRef[i * n + j] - hostRef[i * n + j]) / hostRef[i * n + j]);

				if (max_err2 < err) max_err2 = err;

				average_err2 += err;
			}
		}
	}
	printf("使用共享内存,cpu与gpu对比:Max error: %g Average error: %g\n", max_err2, average_err2 / (n * n));
	// 释放内存
	cudaFree(d_A);
	cudaFree(d_B);
	cudaFree(d_C);
	cudaFree(d_A1);
	cudaFree(d_B1);
	cudaFree(d_C1);

	free(h_A);
	free(h_B);
	free(hostRef);
	free(deviceRef);
	free(sharedeviceRef);

	cudaDeviceReset();

	return (0);
}

运行结果:

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值