第一次摸索机器学习……写的奇怪的神经网络(C++)

原来按照老师要求,要写一个很复杂的模拟人脑的神经网络。。最后我只写了个……翔


而且老师当时的要求,没用sigmod函数……所以我就没用。


然后写着写着变为BP了网络了…


核心:给定一张无向图,我会随机挑选input_size个节点作为输入,output_size个节点作为输出。

然后按照从高到低顺序,构造一张DAG图。然后就。。跑BP算法了。。。


能训练 与、或、二级制识别。

其他没尝试,xor没训练成功,可能我给定的无向图不够漂亮……或者其他因素。3个隐含层节点的BP都能出xor,为啥我不行呢……

recordlog.h是无用头文件,我用来记录日志的。。但是貌似没用


上code

#include <bits/stdc++.h>
#include "recordlog.h"
#include <memory>

using std::cin;
using std::endl;
using std::cout;
#define pr(x)	cout<<#x<<" = "<<x<<" "
#define prln(x)	cout<<#x<<" = "<<x<<endl

class neuron_t;
typedef std::pair<int, double> 		PID;
typedef std::vector<PID> 		neuron_array_t;
typedef std::vector<int>	  	vector_map_t;

class neuron_t
{
	public:
		double 			energy;
		int 			number;
		double 			loss_energy;
		neuron_array_t 		neuron_array;
		double			partial_derivative;
};

template<int neuron_size>
class net_t
{
	public:
		neuron_t		neurons[neuron_size];
		vector_map_t		vector_map[neuron_size];
		std::vector<int> 	output_number;
		std::vector<int> 	input_number;
		int 			tmp[neuron_size];		//临时数组,生成过n的全排列,和拓扑排序中记录入度。
		int 			height[neuron_size];		//辅助构图的高度数组
		int			topology[neuron_size];		//拓扑序
		double			rate;				//学习率

		static double randomDouble(double l, double r)
		{
			return randomInt(l*10000, r * 10000)/10000.0;
		}

		static long long randomInt(long long L, long long R)
		{
			long long tmp = (unsigned long long)rand()
				*(unsigned long long)rand()
				*(unsigned  long long)rand()
				*(unsigned long long)rand() % (R - L + 1);
			return L + tmp;
		}


		net_t ()
		{
			//初始化新的网络
			FILE *file = fopen("input.txt", "r");
			int n;
			fscanf(file, "%d", &n);
			
		//	if (n * ???!= neuron_size){
		//		throws E  TODO
		//	}
			rate = 0.1;
			for (int i = 0; i < neuron_size; ++ i){
				vector_map[i].clear();
				tmp[i] = i;
				neurons[i].number = i;
			}
			output_number.clear();
			input_number.clear();
			prln(neuron_size);
			while (n--){
				int s, t;
				fscanf(file, "%d%d", &s, &t);
				--s;
				--t;
				vector_map[s].push_back(t);
				vector_map[t].push_back(s);
			}
			fclose(file);
		}

		void setIO(std::vector<double> &input, std::vector<double> &output){
			/*
			   //just for test the random
			while (1)
			{
				double a, b;
				cin >> a>>b;
				cout << randomDouble(a,b)<<endl;
				cout << randomInt(a,b)<<endl;
			}
			*/
			if (input.size() == 0){
				//throws something TODO
				return;
			}
			if (output.size() == 0){
				//throws something TODO
				return;
			}
			std::random_shuffle(tmp, tmp + neuron_size);
			printf("output nodes are: ");
			for (int i = 0; i < output.size(); ++ i){
				output_number.push_back(tmp[i]);
				printf("%d ",tmp[i]);
			}
			printf("\n");
			printf("input nodes are:");
			for (int i = output.size(); i < input.size() + output.size(); ++ i){
				input_number.push_back(tmp[i]);
				printf("%d ",tmp[i]);
			}
			printf("\n");
			std::queue<int>q[output.size() + input.size()];
			memset(height, -1, sizeof(height));
			int painted = output.size();
			int cnt=0;
			for (auto curnode : output_number){
				q[cnt++].push(curnode);
				height[curnode] = 0;
			}
			for (auto curnode : input_number){
				q[cnt++].push(curnode);
				height[curnode] = neuron_size;
			}
			bool flag = true;
			while (flag){
				prln(flag);
				int cnt = 0;
				flag = false;
				for (auto curnode : output_number){
					flag |= bfs(q[cnt++], 1);
				}
				for (auto curnode : input_number){
					flag |= bfs(q[cnt++], -1);
				}
			}
			auto build_map = [=](int from, int to){
				neurons[from].neuron_array.push_back(std::make_pair(to, randomDouble(-1,1)));
			};
			for (int i = 0; i < neuron_size; ++ i){
				for (auto curnode : vector_map[i]){
					if (height[i] > height[curnode]){
						build_map(i, curnode);
					}
				}
			}
			for (int i = 0; i < neuron_size; ++ i){
				neurons[i].loss_energy = randomDouble(-1, 1);
			}
			getTopology();
			//至此构造完网络的拓扑结构
		}

		void getTopology()
		{
			memset(tmp, 0, sizeof(tmp));
			for (int i = 0; i < neuron_size; ++ i){
				for (auto nextnode : neurons[i].neuron_array){
					++ tmp[nextnode.first];
				}
			}
			std::queue<int>q;
			for (auto curnode : input_number){
				q.push(curnode);
			}
			int pos = 0;
			while (!q.empty())
			{
				int curnode = q.front();
				q.pop();
				topology[pos++] = curnode;
				for (auto nextnode : neurons[curnode].neuron_array){
					if(-- tmp[nextnode.first] == 0){
						q.push(nextnode.first);
					}
				}
			}
			//DEBUG
			//for (int i = 0; i < neuron_size; ++ i)
			//	pr(i),prln(topology[i]);

		}

		bool bfs(std::queue<int> &q, int delta){
			if (q.empty()){
				return false;
			}
			int h = height[q.front()];
			while (!q.empty() && height[q.front()] == h){
				int curnode = q.front();
				q.pop();
				for (auto nextnode : vector_map[curnode]){
					if (height[nextnode] != -1){
						continue;
					}
					height[nextnode] = h + delta;
					q.push(nextnode);
				}
			}
			return true;
		}



		void cal_propagate(int node){
			neurons[node].energy += neurons[node].loss_energy;
			double cur_energy = neurons[node].energy;
			for (auto nextnode : neurons[node].neuron_array){
				neurons[nextnode.first].energy += cur_energy * nextnode.second;
			}
		}



		void propagate(std::vector<double> &input){
			for (int i = 0; i < neuron_size; ++ i){
				neurons[i].energy = 0;
			}

			for (int i = 0; i != input.size(); ++ i){
				int curnode = input_number[i];
				neurons[curnode].energy = input[i];
			}

			for (int i = 0; i < neuron_size; ++ i){
				int curnode = topology[i];
				cal_propagate(curnode);
			}
		}

		void cal_back(int node){
			double delta_loss_energy = 0;
			//printf("[%d]\n", node);
			for (auto &nextnode : neurons[node].neuron_array){
				delta_loss_energy += neurons[nextnode.first].partial_derivative * nextnode.second;
			//	pr(node),prln(delta_loss_energy);
				nextnode.second -= neurons[nextnode.first].partial_derivative * neurons[node].energy * rate;
			}
			neurons[node].loss_energy -= delta_loss_energy * rate;
			neurons[node].partial_derivative += delta_loss_energy;
		
		}

		void back(std::vector<double> &output){
			for (int i = 0; i < neuron_size; ++ i){
				neurons[i].partial_derivative = 0;
			}

			for (int i = 0; i != output.size(); ++ i)
			{
				int curnode = output_number[i];
				neurons[curnode].partial_derivative = neurons[curnode].energy - output[i];
				//cout<<neurons[curnode].energy<<" "<<output[i]<<" "<<neurons[curnode].partial_derivative << endl;
				neurons[curnode].loss_energy -= (neurons[curnode].energy - output[i]) *rate;
			}

			for (int i = neuron_size - 1; i >= 0; -- i){
				int curnode = topology[i];
				cal_back(curnode);
			}
			//for (int i = 0; i != neuron_size; ++ i)
			//{
			//	pr(i),prln(neurons[i].partial_derivative);
			//}
		}

		void train(std::vector<double> &input, std::vector<double> &output){
			propagate(input);
			back(output);
		}

		void outputNetwork(){
			for (int i = 0; i < neuron_size; ++ i){
				printf("[%d]  loss energy(%.5lf)\n", topology[i], neurons[topology[i]].loss_energy);
				for (auto nextnode : neurons[topology[i]].neuron_array){
					printf("  -> %d (%.5lf)\n", nextnode.first, nextnode.second);
				}
			}
		}

		void test(std::vector<double> &input)
		{
			propagate(input);
			cout<<"output: ";
			for (auto curnode : output_number)
			{
				cout << neurons[curnode].energy << " ";
			}
			cout<<endl;
		
		}
};

int main()
{
	net_t<20> net;
	std::vector<double>input1({1,1});
	std::vector<double>input2({1,0});
	std::vector<double>input3({0,1});
	std::vector<double>input4({0,0});
	std::vector<double>output1({3});
	std::vector<double>output2({2});
	std::vector<double>output3({1});
	std::vector<double>output4({0});
	net.setIO(input1, output1);
	cout<<"--"<<endl;
	for (int i = 1;i<=1000;++i){
		net.train(input1, output1);
		net.train(input2, output2);
		net.train(input3, output3);
		net.train(input4, output4);
		//net.test(input);
	}
	net.test(input1);
	net.test(input2);
	net.test(input3);
	net.test(input4);
	return 0;
}






评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值