源码分享:C++源码实现快速搭建神经网络,模拟sin和cos函数双逼近

9 篇文章 0 订阅
5 篇文章 0 订阅
训练采用1输入,定义域为正负2Π随机数,输出2维向量,对于sin和cos函数计算值。现在用C++源码实现神经网络,模拟函数双逼近预测。

X = { x 1 } , x 1 ∈ [ − 2 π , 2 π ] . X= \{x_1\}, x_1\in\mathbb [-2\pi,2\pi]. X={x1},x1[2π,2π].
Y = { y 1 , y 2 } = F ( X ) = { s i n ( x 1 ) , c o s ( x 1 ) } , Y ∈ [ − 1 , 1 ] . Y=\{y_1,y_2\}=F(X) = \{sin(x_1),cos(x_1)\}, Y\in\mathbb [-1,1]. Y={y1,y2}=F(X)={sin(x1),cos(x1)},Y[1,1].

测试效果,训练步骤设定有点多,可适当减少。网络训练后,预测正确率99.9%

在这里插入图片描述

测试源代码:其中CLMatrix是一个高速矩阵类,详【C++矩阵类源码】
//DESIGNED BY CAILUO @2020-02-10 
//MINI-SUPPORT @ C++14

#include <math.h>
#include <functional>
#include <vector>
#include <time.h>
#include <iostream>
#include <algorithm>
#include "CLMatrix.h"

using namespace std;
//传递函数采用的sigmoid函数实现
double _sigmoid(double x, double y = 0, bool isDev = false) {
	if (isDev)
		return y * (1 - y);
	else
		return 1.0 / (1 + exp(-1.0 * x));
}
//定义的随机数字生成函数
double rand_A_B(double a, double b) {
	return rand() / double(RAND_MAX) * abs(a - b) + min(a, b);
}

int main() {

	//some function define 定义一些必要的功能函数
	srand((unsigned int)time(0));
	function<void(CLMATRIX_CALLBACK_PARAM)> randf = [](CLMATRIX_CALLBACK_PARAM) { v = rand_A_B(-1, 1); };
	auto randfx = [](CLMATRIX_CALLBACK_PARAM) { v = rand_A_B(-3.1415 * 2.0, 3.1515 * 2.0); };
	auto sigmoid = [](CLMATRIX_CALLBACK_PARAM) { v = _sigmoid(v); };
	auto sigmoid_dev = [](CLMATRIX_CALLBACK_PARAM) { v = _sigmoid(0, v, true); };
	auto pureLine = [](CLMATRIX_CALLBACK_PARAM) {};
	auto pureLine_dev = [](CLMATRIX_CALLBACK_PARAM) { v = 1; };

	//define train and test sets 定义训练和测试集
	size_t trainSi = 4000;
	vector<CLMatrix> trainIn(trainSi);
	vector<CLMatrix> trainTag(trainSi);

	size_t testSi = trainSi / 2;
	vector<CLMatrix> testIn(testSi);
	vector<CLMatrix> testTag(testSi);

	//make data 生成训练和测试数据
	for (size_t i = 0; i < max(trainSi, testSi); i++)
	{
		if (i < trainSi) {
			trainIn[i].makeSquare(1, randfx); //x 采用-2Π - 2Π的随机生成数
			trainTag[i].make(1, 2, trainIn[i][0][0])
				.reset([](CLMATRIX_CALLBACK_PARAM)
					{
						if (c % 2 == 0)v = sin(v); //输出1 是 sin(x)
						else v = cos(v); //输出2 是 cos(x)
					});
		}
		if (i < testSi) {
			testIn[i].makeSquare(1, randfx);
			testTag[i].make(1, 2, testIn[i][0][0])
				.reset([](CLMATRIX_CALLBACK_PARAM)
					{
						if (c % 2 == 0)v = sin(v);
						else v = cos(v);
					});
		}
	}

	//define the neuron network struct 
	//定义网络:输入为1,第2层10节点,第3层8节点,第4层10节点,输出层2输出
	vector<size_t> lay = { 1,10,8,10,2 };// 1 input,3 hide layers,2 output network
	vector<function<void(CLMATRIX_CALLBACK_PARAM)>> trans = { sigmoid,sigmoid,sigmoid,pureLine };//except out layer trans func is pureline ,other is sigmoid 除了最后一层采用线性函数外其他采用S函数
	vector<function<void(CLMATRIX_CALLBACK_PARAM)>> transDev = { sigmoid_dev,sigmoid_dev,sigmoid_dev,pureLine_dev };

	//define main training data W,B ( weight matrix and b matrix )
	vector<CLMatrix> W, B;

	class FcNn {
	public:
		// 此处训练过程定义为一个静态函数
		static double train(
			const vector<CLMatrix>& trainIn,
			const vector<CLMatrix>& trainTag,
			const vector<CLMatrix>& testIn,
			const vector<CLMatrix>& testTag,
			size_t trainStep,
			const vector<size_t>& lay,
			const vector<function<void(CLMATRIX_CALLBACK_PARAM)>>& trans,
			const vector<function<void(CLMATRIX_CALLBACK_PARAM)>>& transDev,
			vector<CLMatrix>& w,
			vector<CLMatrix>& b,
			const function<void(CLMATRIX_CALLBACK_PARAM)>* pRandf = 0,
			double ls = 0.01,
			double er = 0.0005,
			bool logout = true
		) {
			if (lay.size() < 3)
				throw std::runtime_error("Layer info define error!");
			if (trans.size() < lay.size() - 1 || transDev.size() < lay.size() - 1)
				throw std::runtime_error("Training data or Test data is not define!");
			if (pRandf) {
				//when init func exist,then init W,B.If not want to init,please set the param nullptr.
				w.resize(lay.size() - 1), b.resize(lay.size() - 1);
				for (size_t i = 0; i < lay.size() - 1; i++)
				{
					w[i].make(lay[i], lay[i + 1], *pRandf);
					b[i].make(1, lay[i + 1], *pRandf);
				}
			}
			vector<CLMatrix>  y(lay.size() - 1), dy(lay.size() - 1), _dy(lay.size() - 1);
			vector<CLMatrix>  dw(lay.size() - 1), db(lay.size() - 1);
			//training
			if (logout) cout << "\ntrain start ...\n";
			size_t skt = min(trainIn.size(), trainTag.size());
			for (size_t k = 0; k < trainStep; k++)
			{
				//forward 正向传递
				auto& in = trainIn[k % skt];
				for (size_t i = 0; i < lay.size() - 1; i++)
				{
					if (i == 0)
						y[i] = (in * w[i] + b[i]).reset(trans[i]); // y=f(x) = f(wx+b)
					else
						y[i] = (y[i - 1] * w[i] + b[i]).reset(trans[i]); // y=f(x) = f(wx+b)
				}

				//backward 误差梯度反传
				auto& tag = trainTag[k % skt];
				for (int i = int(lay.size()) - 2; i >= 0; i--)
				{
					if (i == int(lay.size()) - 2) {
						dy[i] = y[i].reset(transDev[i]).mul(y[i] - tag);
						_dy[i] = w[i].dotMul(dy[i]).sumRows();
						w[i] -= (y[i - 1].T() * dy[i] * ls);
						b[i] -= (dy[i] * ls);
					}
					else if (i == 0) {
						dy[i] = y[i].reset(transDev[i]).mul_T(_dy[i + 1]);
						w[i] -= (in.T() * dy[i] * ls);
						b[i] -= (dy[i] * ls);
					}
					else {
						dy[i] = y[i].reset(transDev[i]).mul_T(_dy[i + 1]);
						_dy[i] = w[i].dotMul(dy[i]).sumRows();
						w[i] -= (y[i - 1].T() * dy[i] * ls);
						b[i] -= (dy[i] * ls);
					}
					//invalid check 做一些数据有效性的检查,此步骤不是必须的,可删掉
					try
					{
						dy[i].invalidPrintAndThrow();
					}
					catch (const std::runtime_error & e)
					{
						printf("\nError Matrix Info:------------------------\n");
						for (size_t i = 0; i < lay.size() - 1; ++i) {
							printf("\nlay %zd:\n", i + 1);
							w[i].print("W"); b[i].print("B"); y[i].print("Y");
							dy[i].print("dy"); _dy[i].print("dy send to up");
						}
						printf("\nInvalid exception: Step = %zd !", k);
						throw std::runtime_error(e);
					}
				}
			}
			//test	
			double yes = 0;
			size_t sk = min(testIn.size(), testTag.size());
			for (size_t k = 0; k < sk; k++)
			{
				auto index = k % sk;
				auto& in = testIn[index];
				for (size_t i = 0; i < lay.size() - 1; i++)
				{
					if (i == 0)
						y[i] = (in * w[i] + b[i]).reset(trans[i]); // y=f(x) = f(wx+b)
					else
						y[i] = (y[i - 1] * w[i] + b[i]).reset(trans[i]); // y=f(x) = f(wx+b)
				}
				auto _er = (y[lay.size() - 2] - testTag[index]).pow(2).sum() / 2.0; //均方误差计算
				if (_er <= er)
					yes++;
			}
			if (logout) {
				for (size_t i = 0; i < lay.size() - 1; ++i) {
					printf("\n>> lay %zd::-------------------\n", i + 1);
					w[i].print("W"); b[i].print("B");
				}
				cout << "\ntrain complete ...\n";
			}
			return yes / sk;
		}
	};

	//training
	double ls = 0.1;
	double ter = 0.0001, tcr = 0.995;
	size_t trainStep = trainIn.size() * 1000;
	auto pRandf = &randf;
ag:
	auto tk = clock();
	auto cr = FcNn::train(
		trainIn, trainTag, testIn, testTag, trainStep,
		lay, trans, transDev,
		W, B, pRandf,
		ls, ter, tcr
	);
	auto time = double(clock() - tk)/CLOCKS_PER_SEC;
	printf("\n训练步数 = %zd, 耗时 = %.3fs, 正确率 = %.2f%% \n", trainStep, time, cr * 100);
	pRandf = 0;
	if (cr < tcr)
		goto ag;

	getchar();
	return 1;
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值