C语言单隐含层BP神经网络代码实现

#include<stdio.h>
#include<stdlib.h>
#include<math.h>

#define Data  10		// 数据样本个数
#define Data2  2		// 测试样本个数
#define In 3			// 神经网络输入维度
#define Out 2			// 神经网络输出维度
#define Neuron 10		// 隐含层神经元个数
//#define RAND_MAX 100	// 最大随机数


//float x[10] = { 1,2,3,4,5,6,7,8,9,10 };
//float y[10] = { 1,4,9,16,25,36,49,64,81,100 };
int i, j, k, jj;
float J;
float Jnum;
float Hidden_in[Neuron], Hidden_out[Neuron], ds_Hidden[Neuron];
float Weight_in[Neuron][In], Bias_in[Neuron], Weight_out[Out][Neuron], Bias_out[Out];
float Weight_in_zhong[Neuron][In], Bias_in_zhong[Neuron], Weight_out_zhong[Out][Neuron], Bias_out_zhong[Out];
float Weight_in_old[Neuron][In], Bias_in_old[Neuron], Weight_out_old[Out][Neuron], Bias_out_old[Out];
float y_pre[Out], e[Out];
float xminmax[In][2];
float yminmax[Out][2];
float x[In], y[Out];

float dJ_de[Out], de_dym[Out];
float dym_dwif[Neuron][In], dJ_dwif[Neuron][In];
float dym_dbif[Neuron], dJ_dbif[Neuron];
float dym_dwof[Out][Neuron], dJ_dwof[Out][Neuron];
float dym_dbof[Out], dJ_dbof[Out];


// 神经网络激活函数输出计算
float calInspirit(float x, int mode1)
{
	float y = 0;
	if (mode1 == 0)		//sigmoid
	{
		y = 1 + (float)exp(-x);
		y = 1 / y;
	}
	else if (mode1 == 1)	//tanh
	{
		y = ((float)exp(x) - (float)exp(-x)) / ((float)exp(x) + (float)exp(-x));
	}
	else if (mode1 == 2)	//teshu
	{
		y = (1 - (float)exp(-x)) / (1 + (float)exp(-x));
	}
	return y;
}


// 神经网络激活函数导数计算
float caldsInspirit(float x, int mode1)
{
	float Sx, dSx = 0;
	float aa, bb, cc, dd, aaa;
	if (mode1 == 0)		//sigmoid
	{
		dSx = (float)exp(-x)/ (1 + (float)exp(-x)) / (1 + (float)exp(-x));
	}
	else if (mode1 == 1)	//tanh
	{
		//aaa = exp(0.1);
		//aa = ((float)exp(x) - (float)exp(-x));
		//bb = ((float)exp(x) + (float)exp(-x));
		//cc = aa / bb;
		//dd = 1 - cc*cc;
		//Sx = ((float)exp(x) - (float)exp(-x)) / ((float)exp(x) + (float)exp(-x));
		//dSx = 1 - Sx*Sx;
		dSx = 1-((float)exp(x) - (float)exp(-x)) / ((float)exp(x) + (float)exp(-x))*((float)exp(x) - (float)exp(-x)) / ((float)exp(x) + (float)exp(-x));
	}
	else if (mode1 == 2)	//teshu
	{
		dSx = 2* (float)exp(-x) / (1+ (float)exp(-x))/ (1 + (float)exp(-x));
	}
	return dSx;
}


// 初始化神经网络参数
void netInit()
{
	for (j = 0; j < Neuron;j++)
	{
		for (i = 0; i < In; i++)
		{
			Weight_in[j][i] = rand() / (float)(RAND_MAX)*0.1;
			Weight_in_old[j][i] = Weight_in[j][i];
			Weight_in_zhong[j][i] = Weight_in[j][i];
		}
		Bias_in[j]= rand() / (float)(RAND_MAX)*0.1;
		Bias_in_old[j] = Bias_in[j];
		Bias_in_zhong[j] = Bias_in[j];
	}
	for (k = 0; k < Out; k++)
	{
		for (j = 0; j < Neuron; j++)
		{
			Weight_out[k][j] = rand() / (float)(RAND_MAX)*0.1;
			Weight_out_old[k][j] = Weight_out[k][j];
			Weight_out_zhong[k][j] = Weight_out[k][j];
		}
		Bias_out[k] = rand() / (float)(RAND_MAX)*0.1;
		Bias_out_old[k] = Bias_out[k];
		Bias_out_zhong[k] = Bias_out[k];
	}
}


// 神经网络输出计算
void network(float *data_in, float *data_out, float data_in_col, float Hiden_col,float data_out_col, int model)
{
	// 输入层到隐含层 输入层维度用i 隐含层用j 输出层用k
	for (j = 0; j < Hiden_col; j++)
	{
		Hidden_in[j] = 0;
		for (i = 0; i < data_in_col; i++)
		{
			Hidden_in[j] = Hidden_in[j] + Weight_in[j][i] * data_in[i];
		}
		Hidden_in[j] = Hidden_in[j] + Bias_in[j];
	}
	// 隐含层激活函数
	for (j = 0; j < Hiden_col; j++)
	{
		Hidden_out[j] = calInspirit(Hidden_in[j], model);
		// Hidden[j] = 1 / (1 + exp(-Hidden[j]));	// sigmoid
		// Hidden[j] = (exp(Hidden[j])- exp(-Hidden[j])) / (exp(Hidden[j]) + exp(-Hidden[j]));	// tanh
		//Hidden[j] = (1 - exp(-Hidden[j])) / (1 + exp(-Hidden[j]));
		// Hidden[j] = 2 / (1 + exp(-2*Hidden[j])) - 1;
		ds_Hidden[j] = caldsInspirit(Hidden_in[j], model);
	}
	// 隐含层到输出层
	for (k = 0; k < 4; k++)
	{
		data_out[k] = 0;
		for (j = 0; j < 17; j++)
		{
			data_out[k] = data_out[k] + Weight_out[k][j] * Hidden_out[j];
		}
		data_out[k] = data_out[k] + Bias_out[k];
	}
}


// 计算神经网络输出误差
float calSSE(float *net_output, float *data_output)
{
	J = 0;
	for (k = 0; k < Out; k++)
	{
		e[k] = net_output[k] - data_output[k];
		J = J + (float)e[k] * (float)e[k] * 0.5;
	}
	return J;
}


// 神经网络反向传播
void backword(float *net_input,float a, float alfa)
{
	for (j = 0; j < Neuron; j++)
	{
		for (i = 0; i < In; i++)
		{
			dJ_dwif[j][i] = 0;
		}
		dJ_dbif[j] = 0;
	}
	for (k = 0; k < Out; k++)
	{
		dJ_de[k] = e[k];
		de_dym[k] = 1;

		for (j = 0; j < Neuron; j++)
		{
			for (i = 0; i < In; i++)
			{
				dym_dwif[j][i] = Weight_out[k][j] * ds_Hidden[j] * net_input[i];

				dJ_dwif[j][i] = dJ_dwif[j][i] + dJ_de[k] * de_dym[k] * dym_dwif[j][i];
			}
			dym_dbif[j] = Weight_out[k][j] * ds_Hidden[j] * 1;
			dJ_dbif[j] = dJ_dbif[j] + dJ_de[k] * de_dym[k] * dym_dbif[j];

			dym_dwof[k][j] = Hidden_out[j];
			dJ_dwof[k][j]= dJ_de[k] * de_dym[k] * dym_dwof[k][j];
		}
		dym_dbof[k] = 1;
		dJ_dbof[k] = dJ_de[k] * de_dym[k] * dym_dbof[k];
	}
	for (j = 0; j < Neuron; j++)
	{
		for (i = 0; i < In; i++)
		{
			Weight_in[j][i] = Weight_in[j][i]-a*dJ_dwif[j][i]+alfa*(Weight_in_zhong[j][i]- Weight_in_old[j][i]);

			Weight_in_old[j][i] = Weight_in_zhong[j][i];
			Weight_in_zhong[j][i] = Weight_in[j][i];
		}
		Bias_in[j] = Bias_in[j] - a*dJ_dbif[j] + alfa*(Bias_in_zhong[j] - Bias_in_old[j]);

		Bias_in_old[j] = Bias_in_zhong[j];
		Bias_in_zhong[j] = Bias_in[j];
	}
	for (k = 0; k < Out; k++)
	{
		for (j = 0; j < Neuron; j++)
		{
			Weight_out[k][j] = Weight_out[k][j] - a*dJ_dwof[k][j] + alfa*(Weight_out_zhong[k][j] - Weight_out_old[k][j]);

			Weight_out_old[k][j] = Weight_out_zhong[k][j];
			Weight_out_zhong[k][j] = Weight_out[k][j];
		}
		Bias_out[k] = Bias_out[k] - a*dJ_dbof[k] + alfa*(Bias_out_zhong[k] - Bias_out_old[k]);

		Bias_out_old[k] = Bias_out_zhong[k];
		Bias_out_zhong[k] = Bias_out[k];
	}
	
}


void train(float data_in[In][Data], float data_out[Out][Data], int maxepoch, int model, float a, float alfa)
{
	netInit();
	printf("神经网络开始训练:\n");
	Jnum = Data;
	int epoch = 0;
	while (Jnum / Data >= 0.000001)
	{
		Jnum = 0;
		epoch = epoch + 1;
		for (int num = 0; num < Data; num++)
		{
			for (i = 0; i < In; i++)
			{
				x[i] = data_in[i][num];
			}
			for (k = 0; k < Out; k++)
			{
				y[k] = data_out[k][num];
			}

			network(x, y_pre, In, Neuron, Out, model);
			J = calSSE(y_pre, y);
			Jnum = Jnum + J;
			backword(x, a, alfa);
		}
		printf("训练次数:%d\t", epoch);
		printf("训练误差:%f\n", Jnum / Data);
		if (epoch >= maxepoch)
		{
			break;
		}
	}
	//for (int epoch = 0; epoch < maxepoch; epoch++)
	//{
	//	Jnum = 0;
	//	for (int num = 0; num < Data; num++)
	//	{
	//		for (i = 0; i < In; i++)
	//		{
	//			x[i] = data_in[i][num];
	//		}
	//		for (k = 0; k < Out; k++)
	//		{
	//			y[k] = data_out[k][num];
	//		}
	//		
	//		network(x, y_pre, In, Neuron, Out, model);
	//		J = calSSE(y_pre, y);
	//		Jnum = Jnum + J;
	//		backword(x, a, alfa);
	//	}
	//	printf("训练次数:%d\t", epoch);
	//	printf("训练误差:%f\n",Jnum / Data);
	//}
}


int main()
{
	// 加载数据集
	float data_in[In][Data] = { {1,		0.9,	0.8,	0.7,	0.6,	0.5,	0.4,	0.3,	0.2,	0.1},
								{0.5,	0.4,	0.3,	0.2,	0.1,	0.6,	0.7,	0.8,	0.9,	1},
								{0.1,	0.5,	0.4,	0.3,	0.2,	0.9,	0.8,	0.7,	0.6,	1} };
	float data_out[Out][Data] = {{0.8,	0.9,	0.75,	0.6,	0.45,	1,		0.95,	0.9,	0.85,	1.05 },
								{0.6,	1,		0.9,	0.8,	0.7,	0.8,	0.5,	0.2,	-0.1,	0.1 } };
	float data_in_test[In][Data2] = {	{0.5,	0.4},
										{0.3,	0.7},
										{0.2,	0.8} };
	float data_out_test[Out][Data2]= {	{0.5,	0.95},
										{0.4,	0.5  } };;
	int maxepoch = 1000000;
	int model = 1;	//0-sigmoid 1-tanh 2-tansig?
	float a = 0.01, alfa=-0.000;
	train(data_in, data_out, maxepoch, model, a, alfa);

	for (int num = 0; num < Data2; num++)
	{
		for (i = 0; i < In; i++)
		{
			x[i] = data_in_test[i][num];
		}
		for (k = 0; k < Out; k++)
		{
			y[k] = data_out_test[k][num];
		}

		network(x, y_pre, In, Neuron, Out, model);
		J = calSSE(y_pre, y);
		printf("测试数据:\t", num);
		printf("测试误差:%f\n",J);
		backword(x, a, alfa);
	}
	a = 0.05;
}
  • 0
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 2
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值