BP神经网络与c++实现

参考文章https://www.cnblogs.com/Finley/p/5946000.html

头文件BP_NN.h

#ifndef BP_NN_H
#define BP_NN_H
#include <iostream>
#include <fstream>
#include <vector>
#include <ctime> // 时间库
#include<random>
#include<iomanip>
#include<math.h>
using namespace std;
class RandomNumber
{
public:
	RandomNumber()
	{
		srand((unsigned)time(NULL));    //析构函数,在对象创建时数据成员执行初始化操作
	}
	int integer(int begin, int end)
	{
		return rand() % (end - begin + 1) + begin;
	}
	double decimal(double a, double b)
	{
		return double(rand() % 10000) / 10000 * (b - a) + a;
	}
	double GaussianNoise(double mu, double sigma)
	{
		return sigma * sqrt(-2 * log(decimal(0, 1)))*cos(2 * 3.1415926*(decimal(0, 1))) + mu;
	}
};
RandomNumber r;
//激活函数
double sigmoid(double x)
{
	return 1.0 / (1.0 + exp(-x));
}
//激活函数一阶导
double sigmoid_derivative(double x)
{
	return x * (1 - x);
}

class BPNeuralNetwork
{
public:
	int input_n;
	int hidden_n;
	int output_n;
	vector<double>input_cells;
	vector<double>hidden_cells;
	vector<double>output_cells;
	vector<vector<double>>input_weights;
	vector<vector<double>>hidden_weights;
	vector<vector<double>>input_correction;
	vector<vector<double>>hidden_correction;
//神经网络函数
	void setup(int ni,int nh,int no);//启动
	vector<double>predict(vector<double>inputs);//输入到输出映射
	double back_propagate(vector<double>input, vector<double>output, double learn, double correct);//误差反向传播
	double train(vector<vector<double>>cases, vector<vector<double>>labels, int limit, double learn, double correct);//训练

}; 
void BPNeuralNetwork::setup(int ni, int nh, int no)
{
	input_n=ni+1;
	hidden_n= nh;
	output_n=no;
	//init cells
	input_cells.resize(input_n,1);
	hidden_cells.resize(hidden_n, 1);
	output_cells.resize(output_n, 1);
	//init weights
	input_weights.resize(input_n, vector<double>(hidden_n));
	hidden_weights.resize(hidden_n, vector<double>(output_n));
	// random activate
	for (int i = 0; i < input_n; i++)
	{
		for (int h = 0; h < hidden_n; h++)
		{
			input_weights[i][h] = r.decimal(-0.2, 0.2);
		}
	}
	for (int h = 0; h < hidden_n; h++)
	{
		for (int o = 0; o < output_n; o++)
		{
			hidden_weights[h][o] = r.decimal(-2.0, 2.0);
		}
	}
	//init correction matrix
	input_correction.resize(input_n, vector<double>(hidden_n));
	hidden_correction.resize(hidden_n, vector<double>(output_n));
}
vector<double>BPNeuralNetwork::predict(vector<double>inputs)
{
	//activate input layer
	for (int i = 0; i < input_n - 1; i++)
	{
		input_cells[i] = inputs[i];
	}
	//activate hidden layer
	double total;
	for (int h = 0; h< hidden_n; h++)
	{
		total = 0.0;
		for (int i=0;i < input_n; i++)
		{
			total += input_cells[i] * input_weights[i][h];
		}
		hidden_cells[h] = sigmoid(total);
	}
	//activate output layer
	for (int o = 0; o < output_n; o++)
	{
		total = 0.0;
		for (int h = 0; h < hidden_n; h++)
		{
			total += hidden_cells[h] * hidden_weights[h][o];
		}
		output_cells[o] = sigmoid(total);
	}
	return output_cells;
}
double BPNeuralNetwork::back_propagate(vector<double>input, vector<double>output, double learn, double correct)
{
	
	//feed forward
	predict(input);
	//get output layer error
	vector<double>output_deltas(output_n, 1.0);
	double error;
	for (int o = 0; o < output_n; o++)
	{
		error = output[o] - output_cells[o];
		output_deltas[o] = sigmoid_derivative(output_cells[o]) * error;
	}
	//get hidden layer error
	vector<double>hidden_deltas(hidden_n, 1.0);
	for (int h = 0; h < hidden_n; h++)
	{
		error = 0.0;
		for (int o = 0; o < output_n; o++)
		{
			error += output_deltas[o] * hidden_weights[h][o];
		}
		hidden_deltas[h] = sigmoid_derivative(hidden_cells[h]) * error;
	}
	//update output weights
	double change;
	for (int h = 0; h < hidden_n; h++)
	{
		for (int o = 0; o < output_n; o++)
		{
			change = output_deltas[o] * hidden_cells[h];
			hidden_weights[h][o] += learn * change + correct * hidden_correction[h][o];
			hidden_correction[h][o] = change;
		}
	}
	//update input weights
	for (int i = 0; i < input_n; i++)
	{
		for (int h = 0; h < hidden_n; h++)
		{
			change = hidden_deltas[h] * input_cells[i];
			input_weights[i][h] += learn * change + correct * input_correction[i][h];
			input_correction[i][h] = change;
		}
	}
	//get global error
	error = 0.0;
	for (int o = 0; o < output.size(); o++)
	{
		error += 0.5 * (output[o] - output_cells[o]) *(output[o] - output_cells[o]);
	}
	return error;
}
double BPNeuralNetwork::train(vector<vector<double>>cases, vector<vector<double>>labels, int limit, double learn, double correct)
{
	double error=100;
	vector<double>label;
	vector<double>case1;
	for (int j = 0; j <  limit; j++)
	{
		error = 0.0;
		for (int i = 0; i < cases.size(); i++)
		{
			label = labels[i];
			case1 = cases[i];
			error += back_propagate(cases[i], labels[i], learn, correct);
		}
		cout <<"第"<<j<<"次训练:"<< error << endl;
	}
	return error;
}
vector<vector<double>> Data_normalization(vector<vector<double>>x)
{
	double min, max;
	for (int i = 0; i <size(x[0]); i++)
	{
		min = x[0][i], max = x[0][i];
		for (int j = 1; j < size(x); j++)
		{
			if (x[j][i] < min)
			{
				min = x[j][i];
			}
			if (x[j][i] > max)
			{
				max= x[j][i];
			}
		}
		for (int j =0; j < size(x); j++)
		{
			x[j][i] = (x[j][i] - min) / (max - min);
		}
	}
	return x;
}
const string  test_function = "1";//测6试函数
double function(vector<double> x)
{
	double fx = 0;
	int n = x.size();
	//============================测试函数=============================
	//1.Sphere函数  变量[-100,100]  
	if (test_function == "1")
	{
		for (int i = 0; i < n; i++)
		{
			fx = fx + pow(x[i], 2);
		}
	}
	if (test_function=="2")
	{
		for (int i = 0; i < n; i++)
		{
			fx = fx + x[i] * x[i] - 10 * cos(2 * 3.1415926*x[i]) + 10;
		}
	}
	return fx;
}
#endif

主函数BP_NN.cpp

#include"BP_NN.h"
int main()
{
    ofstream out("隐藏层神经元敏感性分析.dat");
    double rate,success_rate;
    BPNeuralNetwork self;
    int ni = 3, training_n = 10, no = 1, nh = 5;
    vector<vector<double>>training_x(training_n, vector<double>(ni));    
    vector<vector<double>>training_y(training_n, vector<double>(no));
    for (int i = 0; i < training_n; i++)
    {
	for (int j = 0; j < ni; j++)
	{
	     training_x[i][j] = r.decimal(0, 1);
	}
    }
    for (int i = 0; i < training_n; i++)
    {
	for (int j = 0; j < no; j++)
	{
	     training_y[i][j]= r.decimal(0, 1);
	}
    }
    self.setup(ni, nh, no);
    double error=self.train(training_x, training_y, 100000, 0.05, 0.1);
    vector<double> predict_y;
    int j = 0;
    cout <<"样本训练值"<< "、" << "样本预测值" << "、" << "训练误差" << endl;
    for (int i = 0; i < training_x.size(); i++)
    {
        predict_y = self.predict(training_x[i]);
	rate = (training_y[i][0] - predict_y[0]) / training_y[i][0];
	if (rate < 0.05)
	{
	     j++;
	}
	cout << training_y[i][0] << "、" << predict_y[0] << "、" <<(training_y[i][0] - predict_y[0]) / training_y[i][0]<< endl;
		}
	cout <<"训练精度:"<< fixed << setw(12) << setprecision(6) << error << "     样本训练成功率:" << setw(12) << setprecision(6) << double(j) / training_x.size() << endl;			
}

 

  • 1
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值