CUDA学习笔记5——CUDA程序错误检测

CUDA程序错误检测

所有CUDA的API函数都有一个类型为cudaError_t的返回值,代表了一种错误信息;只有返回cudaSuccess时,才是成功调用。

  • cudaGetLastError()用来检测核函数的执行是否出错
  • cudaGetErrorString()输出错误信息
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include <malloc.h> 
#include <opencv2/opencv.hpp>
#include <stdlib.h>



#define BLOCK_SIZE 1


//图像卷积 GPU
__global__ void sobel_gpu(unsigned char* in, unsigned char* out, const int Height, const int Width)
{
	int x = blockDim.x * blockIdx.x + threadIdx.x;
	int y = blockDim.y + blockIdx.y + threadIdx.y;
	int index = y * Width + x;

	int Gx = 0;
	int Gy = 0;

	unsigned char x0, x1, x2, x3, x4, x5, x6, x7, x8;

	if (x>0 && x<(Width-1) && y>0 && y<(Height-1))
	{
		x0 = in[(y - 1)*Width + (x - 1)];
		x1 = in[(y - 1)*Width + (x)];
		x2 = in[(y - 1)*Width + (x + 1)];
		x3 = in[(y)*Width + (x - 1)];

		x5 = in[(y)*Width + (x + 1)];
		x6 = in[(y + 1)*Width + (x - 1)];
		x7 = in[(y + 1)*Width + (x)];
		x8 = in[(y + 1)*Width + (x + 1)];

		Gx = (x0 + 2 * x3 + x6) - (x2 + 2 * x5 + x8);
		Gy = (x0 + 2 * x1 + x2) - (x6 + 2 * x7 + x8);

		out[index] = (abs(Gx) + abs(Gy)) / 2;
	}
}



int main()
{
	cv::Mat src;
	src = cv::imread("complete004.jpg");

	cv::Mat grayImg,gaussImg;
	cv::cvtColor(src, grayImg, cv::COLOR_BGR2GRAY);
	cv::GaussianBlur(grayImg, gaussImg, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT);

	int height = src.rows;
	int width = src.cols;
	//输出图像
	cv::Mat dst_gpu(height, width, CV_8UC1, cv::Scalar(0));
	//GPU存储空间
	int memsize = height * width * sizeof(unsigned char);
	//输入 输出
	unsigned char* in_gpu;
	unsigned char* out_gpu;

	cudaMalloc((void**)&in_gpu, memsize);
	cudaMalloc((void**)&out_gpu, memsize);
	cudaError_t error_code;

	dim3 threadsPreBlock(BLOCK_SIZE, BLOCK_SIZE);
	dim3 blocksPreGrid((width + threadsPreBlock.x - 1)/threadsPreBlock.x, (height + threadsPreBlock.y - 1)/threadsPreBlock.y);
	
	cudaMemcpy(in_gpu, gaussImg.data, memsize, cudaMemcpyHostToDevice);

	sobel_gpu <<<blocksPreGrid, threadsPreBlock>>> (in_gpu, out_gpu, height, width);
	error_code = cudaGetLastError();
	
	printf("Error: %s\n", cudaGetErrorString(error_code));
	printf("FILE: %s\n", __FILE__);
	printf("LINE: %d\n", __LINE__);
	printf("Error code: %d\n", error_code);

	cudaMemcpy(dst_gpu.data, out_gpu, memsize, cudaMemcpyDeviceToHost);

	cv::imwrite("dst_gpu_save.png", dst_gpu);

	//cv::namedWindow("src", cv::WINDOW_NORMAL);
	cv::imshow("src", src);
	cv::imshow("dst_gpu", dst_gpu);
	cv::waitKey();

	cudaFree(in_gpu);
	cudaFree(out_gpu);

	return 0;
}



在这里插入图片描述在这里插入图片描述

樊哲勇大牛的检测CUDA运行时错误的宏函数:

#pragma once
#include<stdio.h>

#define CHECK(call)																\
do                                                                              \
{																				\
	const cudaError_t error_code = call;										\
	if (error_code != cudaSuccess)												\
	{																			\
		printf("CUDA Error:\n");												\
		printf("	File:	%s\n", __FILE__);									\
		printf("	Line:	%d\n",__LINE__);									\
		printf("	Error code: %d\n",error_code);								\
		printf("	Error text: %s\n", cudaGetErrorString(error_code));			\
		exit(1);																\
	}																			\
} while (0)																		

采用检测CUDA运行时错误的宏函数,图像卷积:
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<math.h>
#include <malloc.h> 
#include <opencv2/opencv.hpp>
#include <stdlib.h>

#include "error.cuh"

#define BLOCK_SIZE 1


//图像卷积 GPU
__global__ void sobel_gpu(unsigned char* in, unsigned char* out, const int Height, const int Width)
{
	int x = blockDim.x * blockIdx.x + threadIdx.x;
	int y = blockDim.y + blockIdx.y + threadIdx.y;
	int index = y * Width + x;

	int Gx = 0;
	int Gy = 0;

	unsigned char x0, x1, x2, x3, x4, x5, x6, x7, x8;

	if (x>0 && x<(Width-1) && y>0 && y<(Height-1))
	{
		x0 = in[(y - 1)*Width + (x - 1)];
		x1 = in[(y - 1)*Width + (x)];
		x2 = in[(y - 1)*Width + (x + 1)];
		x3 = in[(y)*Width + (x - 1)];

		x5 = in[(y)*Width + (x + 1)];
		x6 = in[(y + 1)*Width + (x - 1)];
		x7 = in[(y + 1)*Width + (x)];
		x8 = in[(y + 1)*Width + (x + 1)];

		Gx = (x0 + 2 * x3 + x6) - (x2 + 2 * x5 + x8);
		Gy = (x0 + 2 * x1 + x2) - (x6 + 2 * x7 + x8);

		out[index] = (abs(Gx) + abs(Gy)) / 2;
	}
}



int main()
{
	cv::Mat src;
	src = cv::imread("complete004.jpg");

	cv::Mat grayImg,gaussImg;
	cv::cvtColor(src, grayImg, cv::COLOR_BGR2GRAY);
	cv::GaussianBlur(grayImg, gaussImg, cv::Size(3,3), 0, 0, cv::BORDER_DEFAULT);

	int height = src.rows;
	int width = src.cols;
	//输出图像
	cv::Mat dst_gpu(height, width, CV_8UC1, cv::Scalar(0));
	//GPU存储空间
	int memsize = height * width * sizeof(unsigned char);
	//输入 输出
	unsigned char* in_gpu;
	unsigned char* out_gpu;

	cudaMalloc((void**)&in_gpu, memsize);
	cudaMalloc((void**)&out_gpu, memsize);

	dim3 threadsPreBlock(BLOCK_SIZE, BLOCK_SIZE);
	dim3 blocksPreGrid((width + threadsPreBlock.x - 1)/threadsPreBlock.x, (height + threadsPreBlock.y - 1)/threadsPreBlock.y);
	
	cudaMemcpy(in_gpu, gaussImg.data, memsize, cudaMemcpyHostToDevice);

	sobel_gpu <<<blocksPreGrid, threadsPreBlock>>> (in_gpu, out_gpu, height, width);

	CHECK(cudaMemcpy(dst_gpu.data, out_gpu, memsize*10, cudaMemcpyDeviceToHost));//增大size值  引起报错

	cv::imwrite("dst_gpu_save.png", dst_gpu);

	//cv::namedWindow("src", cv::WINDOW_NORMAL);
	cv::imshow("src", src);
	cv::imshow("dst_gpu", dst_gpu);
	cv::waitKey();

	cudaFree(in_gpu);
	cudaFree(out_gpu);

	return 0;
}



在这里插入图片描述

采用检测CUDA运行时错误的宏函数,矩阵相加:

二维网络和二维线程块对二维矩阵进行索引,每个线程可负责一个矩阵元素的计算任务;
ix=threadIdx.x+blockIdx.xblockDim.x
iy=threadIdx.y+blockIdx.y
blockDim.y
在这里插入图片描述

#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "error.cuh"
#include <stdlib.h>
#include <iostream>


__global__ void addMatrix(int *input_1, int *input_2, int *output, const int nx, const int ny)
{
	int ix = threadIdx.x + blockIdx.x * blockDim.x;
	int iy = threadIdx.y + blockIdx.y * blockDim.y;
	unsigned int idx = iy * nx + ix;

	if (ix < nx && iy < ny)
	{
		output[idx] = input_1[idx] + input_2[idx];
	}

}


int main(void)
{

	int nx = 16;
	int ny = 8;
	int nxy = nx * ny;
	size_t stBytesCount = nxy * sizeof(int);

	int *ipHost_A, *ipHost_B, *ipHost_C;
	ipHost_A = (int *)malloc(stBytesCount);
	ipHost_B = (int *)malloc(stBytesCount);
	ipHost_C = (int *)malloc(stBytesCount);
	if (ipHost_A != NULL && ipHost_B != NULL && ipHost_C != NULL)
	{
		for (int i = 0; i < nxy; i++)
		{
			ipHost_A[i] = i;
			ipHost_B[i] = i + 1;
		}
		memset(ipHost_C, 0, stBytesCount);
	}
	else
	{
		printf("Fail to allocate host memory! \n");
	}
	// 分配内存 初始化
	int *ipDevice_A, *ipDevice_B, *ipDevice_C;
	CHECK(cudaMalloc((int**)&ipDevice_A, stBytesCount));
	CHECK(cudaMalloc((int**)&ipDevice_B, stBytesCount));
	CHECK(cudaMalloc((int**)&ipDevice_C, stBytesCount));
	if (ipDevice_A != NULL && ipDevice_B != NULL && ipDevice_C != NULL)
	{
		CHECK(cudaMemcpy(ipDevice_A, ipHost_A, stBytesCount, cudaMemcpyHostToDevice));
		CHECK(cudaMemcpy(ipDevice_B, ipHost_B, stBytesCount, cudaMemcpyHostToDevice));
		CHECK(cudaMemcpy(ipDevice_C, ipHost_C, stBytesCount, cudaMemcpyHostToDevice));
	}
	else
	{
		printf("Fail to allocate memory \n");
		free(ipHost_A);
		free(ipHost_B);
		free(ipHost_C);
		exit(1);
	}
	//
	dim3 block(4,4);//线程块大小 4*4
	dim3 grid((nx + block.x-1)/block.x, (ny+block.y - 1)/block.y);
	printf("Thread config:grid:<%d,%d>, block<%d,%d>\n", grid.x, grid.y, block.x, block.y);
	// 调用核函数
	addMatrix <<< grid, block>>> (ipDevice_A, ipDevice_B, ipDevice_C, nx, ny);
	//拷贝出结果
	CHECK(cudaMemcpy(ipHost_C, ipDevice_C, stBytesCount,cudaMemcpyDeviceToHost));

	for (int i = 0; i < 10; i++)
	{
		printf("id:%d, martix_A: %d, martix_B: %d \n", i+1, ipHost_A[i], ipHost_B[i], ipHost_C[i]);
	}
	if (ipHost_C != NULL)
	{
		for (int i = 0; i < nxy; i++)
		{
			printf("C[%d]: %d ", i, ipHost_C[i]);
			if ((i+1)%16 == 0)
			{
				printf("\n");
			}
		}
	}
	//释放主机内存
	free(ipHost_A);
	free(ipHost_B);
	free(ipHost_C);

	CHECK(cudaFree(ipDevice_A));
	CHECK(cudaFree(ipDevice_B));
	CHECK(cudaFree(ipDevice_C));
	
	CHECK(cudaDeviceReset());//清空申请的当前关联gpu设备资源

	return 0;


}

在这里插入图片描述

deviceQuery.exe Starting... CUDA Device Query (Runtime API) version (CUDART static linking) Detected 1 CUDA Capable device(s) Device 0: "GeForce GTX 650" CUDA Driver Version / Runtime Version 9.1 / 8.0 CUDA Capability Major/Minor version number: 3.0 Total amount of global memory: 2048 MBytes (2147483648 bytes) ( 2) Multiprocessors, (192) CUDA Cores/MP: 384 CUDA Cores GPU Max Clock rate: 1072 MHz (1.07 GHz) Memory Clock rate: 2500 Mhz Memory Bus Width: 128-bit L2 Cache Size: 262144 bytes Maximum Texture Dimension Size (x,y,z) 1D=(65536), 2D=(65536, 65536), 3D=(4096, 4096, 4096) Maximum Layered 1D Texture Size, (num) layers 1D=(16384), 2048 layers Maximum Layered 2D Texture Size, (num) layers 2D=(16384, 16384), 2048 layers Total amount of constant memory: 65536 bytes Total amount of shared memory per block: 49152 bytes Total number of registers available per block: 65536 Warp size: 32 Maximum number of threads per multiprocessor: 2048 Maximum number of threads per block: 1024 Max dimension size of a thread block (x,y,z): (1024, 1024, 64) Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) Maximum memory pitch: 2147483647 bytes Texture alignment: 512 bytes Concurrent copy and kernel execution: Yes with 1 copy engine(s) Run time limit on kernels: Yes Integrated GPU sharing Host Memory: No Support host page-locked memory mapping: Yes Alignment requirement for Surfaces: Yes Device has ECC support: Disabled CUDA Device Driver Mode (TCC or WDDM): WDDM (Windows Display Driver Model) Device supports Unified Addressing (UVA): Yes Device PCI Domain ID / Bus ID / location ID: 0 / 1 / 0 Compute Mode: deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 9.1, CUDA Runtime Version = 8.0, NumDevs = 1, Device0 = GeForce GTX 650 Result = PASS
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值