深度学习BP算法之C语言实现

#include<stdio.h>
#include <stdlib.h>
#include <math.h>
#define ETA 1//Learning_rate
#define PRECISION 0.00001

typedef struct Neuron
{
	double input;
	double output;
	double* weights;
	double Error;

} NEURON;
typedef struct Layer
{
	int numberOfNeurons;
	NEURON *neurons;
} LAYER;
typedef struct NNet {
	int numberOfLayers;
	LAYER *layers;
} NNET;
/function
double sigmoid(double v)
{
	return 1 / (1 + exp(-v));
}

double randomWeight()  //random weight generator between -0.5 ~ 0.5 
{
	return ((int)rand() % 100000) / (float)100000 - 1;
}

void createNetWorks(NNET *nnet,int NumberOfLayers,int* NumberOfNeurons) {
	nnet->numberOfLayers = NumberOfLayers;
	nnet->layers =(LAYER*) malloc(NumberOfLayers * sizeof(LAYER));
	for (int i = 0; i < NumberOfLayers; i++) {
		nnet->layers[i].numberOfNeurons = NumberOfNeurons[i];
		nnet->layers[i].neurons = (NEURON*)malloc(NumberOfNeurons[i] * sizeof(NEURON));
	}
}

void init(NNET *nnet,double * inputs) {
	for (int i = 0; i < nnet->layers[0].numberOfNeurons; i++) {
		nnet->layers[0].neurons[i].output = inputs[i];
	}
	for (int i = 1; i < nnet->numberOfLayers; i++) {
		for (int j = 0; j < nnet->layers[i].numberOfNeurons; j++) {
			nnet->layers[i].neurons[j].weights = (double*)malloc(nnet->layers[i - 1].numberOfNeurons * sizeof(double));
			double input = 0;
			for (int kk = 0; kk < nnet->layers[i - 1].numberOfNeurons; kk++) {
				double	weight = randomWeight();
				nnet->layers[i].neurons[j].weights[kk] = weight;
				input += nnet->layers[i - 1].neurons[kk].output*weight;
			}
			nnet->layers[i].neurons[j].input = input;
			nnet->layers[i].neurons[j].output = sigmoid(input);
		
		}
	}
}
void feedforward(NNET *nnet) {
	for (int i = 1; i < nnet->numberOfLayers; i++) {
		for (int j = 0; j < nnet->layers[i].numberOfNeurons; j++) {
			double input = 0;
			for (int kk = 0; kk < nnet->layers[i - 1].numberOfNeurons; kk++) {
				double	weight = nnet->layers[i].neurons[j].weights[kk];
				input += nnet->layers[i - 1].neurons[kk].output*weight;
			}
			nnet->layers[i].neurons[j].input = input;
			nnet->layers[i].neurons[j].output = sigmoid(input);
		}
	}
}
void feedforwardWithiInput(NNET *nnet, double* input) {
	for (int i = 0; i < nnet->layers[0].numberOfNeurons; i++) {
		nnet->layers[0].neurons[i].output = input[i];
	}
	for (int i = 1; i < nnet->numberOfLayers; i++) {
		for (int j = 0; j < nnet->layers[i].numberOfNeurons; j++) {
			double input = 0;
			for (int kk = 0; kk < nnet->layers[i - 1].numberOfNeurons; kk++) {
				double	weight = nnet->layers[i].neurons[j].weights[kk];
				input += nnet->layers[i - 1].neurons[kk].output*weight;
			}
			nnet->layers[i].neurons[j].input = input;
			nnet->layers[i].neurons[j].output = sigmoid(input);
		}
	}
}
void backprop(NNET *nnet,double* targets) {
	//double **Errors= (double**)malloc(nnet->numberOfLayers * sizeof(double*));

	int num = nnet->layers[nnet->numberOfLayers - 1].numberOfNeurons;
	//Errors[nnet->numberOfLayers - 1]=(double*)malloc((num+1)*sizeof(double));
	for (int i = 0; i < num; i++) {
		double out = nnet->layers[nnet->numberOfLayers - 1].neurons[i].output;
		nnet->layers[nnet->numberOfLayers - 1].neurons[i].Error =out*(1-out)*(targets[i]-out);
	}
	
	for (int i = nnet->numberOfLayers - 1; i >= 0;) {
		if (i != 0) {
		//	Errors[i - 1] = (double*)malloc(nnet->layers[i - 1].numberOfNeurons * sizeof(double));
			for (int jj = 0; jj < nnet->layers[i - 1].numberOfNeurons; jj++) {
				double temp=0;
				for (int kk = 0; kk < nnet->layers[i].numberOfNeurons; kk++) {
					temp +=  nnet->layers[i].neurons[kk].weights[jj]*nnet->layers[i].neurons[kk].Error;
					nnet->layers[i].neurons[kk].weights[jj] = nnet->layers[i].neurons[kk].weights[jj] + ETA * nnet->layers[i].neurons[kk].Error *nnet-> layers[i - 1].neurons[jj].output;
				}
				double out = nnet->layers[i - 1].neurons[jj].output;
				
				nnet->layers[i-1].neurons[jj].Error= out * (1 - out)*temp;
				
			
			}
		}
		i--;
		
	}
	
}

int main() {
	
	NNET* net=(NNET*)malloc(sizeof(NNET));
	int num = 3;
	int a[4] = { 3,3,1 };
	createNetWorks(net, num, a);
	double input[4] = { 1,1,1 };
	double input1[4] = { 1,0,1 };
	double input2[4] = { 1,1,0 };
	double input3[4] = { 0,1,1 };
	
	double target0[1] = { 0.8 };
	double target1[1] = { 0.7 };
	double target2[1] = { 0.5 };

	double target3[1] = { 0.3 };
	init(net,input);
	printf("\n");
	int alpha = 0;
	int flag = 0;
	while (1) {

		feedforwardWithiInput(net, input);
		backprop(net, target0);

		feedforwardWithiInput(net, input1);
		backprop(net, target1);

		feedforwardWithiInput(net, input2);
		backprop(net, target2);

		feedforwardWithiInput(net, input3);
		backprop(net, target3);

		alpha++;
		feedforwardWithiInput(net, input);
		if (fabs(net->layers[2].neurons[0].output - target0[0]) >= PRECISION) {
			//flag = 1;
			continue;
		}
		feedforwardWithiInput(net, input1);
		if (fabs(net->layers[2].neurons[0].output - target1[0]) >= PRECISION) {
			//flag = 1;
			continue;
		}
		feedforwardWithiInput(net, input2);
		if (fabs(net->layers[2].neurons[0].output - target2[0]) >= PRECISION) {
			//flag = 1;
			continue;
		}

		feedforwardWithiInput(net, input3);
		if (fabs(net->layers[2].neurons[0].output - target3[0]) >= PRECISION) {
			//flag = 1;
			continue;
		}
		break;
		
	}

	
	printf("\n");
	printf("Numbers of iteration : %d",alpha);
	printf("\n");
	feedforwardWithiInput(net, input);
	printf(" %f  \n", net->layers[2].neurons[0].output);
	feedforwardWithiInput(net, input1);
	printf(" %f  \n", net->layers[2].neurons[0].output);
	feedforwardWithiInput(net, input2);
	printf(" %f  \n", net->layers[2].neurons[0].output);
	feedforwardWithiInput(net, input3);
	printf(" %f  \n", net->layers[2].neurons[0].output);
	getchar();
	return 0;
}

  • 2
    点赞
  • 36
    收藏
    觉得还不错? 一键收藏
  • 6
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值