BP匹配算法C语言程序,BP算法 C语言实现

文件输入输出目录为:F:\BP\

训练样本文件名:训练样本.txt

值为:

1

1

-1

1

-1

1

0

1

0

1

输出文件名为:阈值.txt 权值.txt

=========================

#include "stdlib.h"

#include "math.h"

#include "conio.h"

#include "stdio.h"

#define N 2 /*/学习样本个数*/

#define IN 3 /*/输入层神经元数目*/

#define HN 3 /*/隐层神经元数目*/

#define ON 2 /*/输出层神经元数目*/

#define Z 20 /*/旧权值保存-》每次study的权值都保存下来*/

double P[IN]; /*/单个样本输入数据*/

double T[ON]; /*/单个样本教师数据*/

double W[HN][IN]; /*/输入层至隐层权值*/

double V[ON][HN]; /*/隐层至输出层权值*/

double X[HN]; /*/隐层的输入*/

double Y[ON]; /*/输出层的输入*/

double H[HN]; /*/隐层的输出*/

double O[ON]; /*/输出层的输出*/

double YU_HN[HN]; /*/隐层的阈值*/

double YU_ON[ON]; /*/输出层的阈值*/

double err_m[N]; /*/第m个样本的总误差*/

double a; /*/输出层至隐层的学习效率*/

double b; /*/隐层至输入层学习效率*/

double alpha; /*/动量因子,改进型bp算法使用*/

double d_err[ON];

FILE *fp;

/*定义一个放学习样本的结构*/

struct {

double input[IN];

double teach[ON];

}Study_Data[N];

/*改进型bp算法用来保存每次计算的权值*/

struct {

double old_W[HN][IN];

double old_V[ON][HN];

}Old_WV[Z];

int Start_Show()

{

clrscr();

printf("\n ***********************\n");

printf(" * Welcome to

use *\n");

printf(" * this program

of *\n");

printf(" * calculating the BP *\n");

printf(" * model! *\n");

printf(" * Happy every

day! *\n");

printf(" ***********************\n");

printf("\n\nBefore starting,please read the follows

carefully:\n\n");

printf(" 1.Please ensure the Path of the '训练样本.txt'(xunlianyangben.txt)

is \ncorrect,like 'F:\BP\训练样本.txt'!\n");

printf(" 2.The calculating results will be saved in the Path of

'F:\\BP\\'!\n");

printf(" 3.The program will load 10 datas when running from

'F:\\BP\\训练样本.txt'!\n");

printf(" 4.The program of BP can study itself for no more than 30000

times.\nAnd surpassing the number,the program will be ended by

itself in\npreventing running infinitely because of

error!\n");

printf("\n\n\n");

printf("Now press any key to start...\n");

getch();

getch();

clrscr();

}

int End_Show()

{

printf("\n\n---------------------------------------------------\n");

printf("The program has reached the end successfully!\n\nPress any

key to exit!\n\n");

printf("\n ***********************\n");

printf(" * This is the

end *\n");

printf(" * of the program which*\n");

printf(" * can calculate the BP*\n");

printf(" * model! *\n");

printf(" ***********************\n");

printf(" * Thanks for using! *\n");

printf(" * Happy every

day! *\n");

printf(" ***********************\n");

getch();

exit(0);

}

GetTrainingData() /*OK*/

{ int m,i,j;

int datr;

if((fp=fopen("f:\\bp\\训练样本.txt","r"))==NULL) /*读取训练样本*/

{

printf("Cannot open file strike any key

exit!");

getch();

exit(1);

}

m=0;

i=0;

j=0;

while(fscanf(fp,"%d",&datr)!=EOF)

{j++;

if(j<=(N*IN))

{if(i

{

Study_Data[m].input[i]=datr;

/*printf("\nthe

Study_Datat[%d].input[%d]=%f\n",m,i,Study_Data[m].input[i]);getch();*/ /*use to check the loaded training datas*/

}

if(m==(N-1)&&i==(IN-1))

{

m=0;

i=-1;

}

if(i==(IN-1))

{

m++;

i=-1;

}

}

else

if((N*IN)<=(N*(IN+ON)))

{if(i

{Study_Data[m].teach[i]=datr;

/*printf("\nThe

Study_Data[%d].teach[%d]=%f",m,i,Study_Data[m].teach[i]);getch();*/ /*use to check the loaded training datas*/

}

if(m==(N-1)&&i==(ON-1))

printf("\n");

if(i==(ON-1))

{m++;

i=-1;

}

}

i++;

}

fclose(fp);

printf("\nThere are [%d] datats that have been loaded

successfully!\n",j);

/*show the data which has been loaded!*/

printf("\nShow the data which has been loaded as

follows:\n");

for(m=0;m

{for(i=0;i

{printf("\nStudy_Data[%d].input[%d]=%f",m,i,Study_Data[m].input[i]);

}

for(j=0;j

{printf("\nStudy_Data[%d].teach[%d]=%f",m,j,Study_Data[m].teach[j]);

}

}

printf("\n\nPress any key to start calculating...");

getch();

return 1;

}

/*///*/

/*初始化权、阈值子程序*/

/*///*/

initial()

{int i;

int ii;

int j;

int jj;

int k;

int kk;

/*隐层权、阈值初始化*/

for(i=0;i

{

for(j=1;j

{W[i][j]=(double)((rand()/32767.0)*2-1);

/*初始化输入层到隐层的权值,随机模拟0 和 1 -1 */

printf("w[%d][%d]=%f\n",i,j,W[i][j]);

}

}

for(ii=0;ii

{

for(jj=0;jj

{V[ii][jj]=

(double)((rand()/32767.0)*2-1);

/*初始化隐层到输出层的权值,随机模拟0 和 1 -1*/

printf("V[%d][%d]=%f\n",ii,jj,V[ii][jj]);

}

}

for(k=0;k

{

YU_HN[k] =

(double)((rand()/32767.0)*2-1); /*隐层阈值初始化

,-0.01 ~ 0.01 之间*/

printf("YU_HN[%d]=%f\n",k,YU_HN[k]);

}

for(kk=0;kk

{

YU_ON[kk] = (double)((rand()/32767.0)*2-1);

/*输出层阈值初始化 ,-0.01 ~ 0.01 之间*/

}

return 1;

}/*子程序initial()结束*/

/*//*/

/*第m个学习样本输入子程序*/

/*/*/

input_P(int m)

{ int i,j;

for(i=0;i

{P[i]=Study_Data[m].input[i];

printf("P[%d]=%f\n",i,P[i]);

}

/*获得第m个样本的数据*/

return 1;

}/*子程序input_P(m)结束*/

/*/*/

/*第m个样本教师信号子程序*/

/*/*/

input_T(int m)

{int k;

for(k=0;k

T[k]=Study_Data[m].teach[k];

return 1;

}/*子程序input_T(m)结束*/

H_I_O()

{

double sigma;

int i,j;

for(j=0;j

{

sigma=0;

for(i=0;i

{sigma+=W[j][i]*P[i];/*求隐层内积*/

}

X[j]=sigma-YU_HN[i];/*求隐层净输入,为什么减隐层的阀值*/

H[j]=1.0/(1.0+exp(-X[j]));/*求隐层输出 siglon算法*/

}

return 1;

}/*子程序H_I_O()结束*/

O_I_O()

{int k;

int j;

double sigma;

for(k=0;k

{

sigma=0.0;

for(j=0;j

{

sigma+=V[k][j]*H[k];

}

Y[k]=sigma-YU_ON[k];

O[k]=1.0/(1.0+exp(-Y[k]));

}

return 1;

}

int Err_O_H(int m)

{int k;

double abs_err[ON];

double sqr_err=0;

for (k=0;k

{

abs_err[k]=T[k]-O[k];

sqr_err+=(abs_err[k])*(abs_err[k]);

d_err[k]=abs_err[k]*O[k]*(1.0-O[k]);

err_m[m]=sqr_err/2;

}

return 1;

}

double e_err[HN];

int Err_H_I()

{

int j,k;

double sigma;

for(j=0;j

{

sigma=0.0;

for(k=0;k

{

sigma=d_err[k]*V[k][j];

}

e_err[j]=sigma*H[j]*(1-H[j]);

}

return 1;

}

saveWV(int m)

{int i;

int ii;

int j;

int jj;

for(i=0;i

{

for(j=0;j

{

Old_WV[m].old_W[i][j] = W[i][j];

}

}

for(ii=0;ii

{

for(jj=0;jj

{

Old_WV[m].old_V[ii][jj] = V[ii][jj];

}

}

return 1;

}

int Delta_O_H(int

n) /*(int m,int n)*/

{int k,j;

if(n<1) /*n<=1*/

{

for (k=0;k

{

for (j=0;j

{

V[k][j]=V[k][j]+a*d_err[k]*H[j];

}

YU_ON[k]+=a*d_err[k];

}

}

else if(n>1)

{

for (k=0;k

{

for (j=0;j

{

V[k][j]=V[k][j]+a*d_err[k]*H[j]+alpha*(V[k][j]-Old_WV[(n-1)].old_V[k][j]);

}

YU_ON[k]+=a*d_err[k];

}

}

return 1;

}

Delta_H_I(int

n) /*(int m,int n)*/

{ int i,j;

if(n<=1) /*n<=1*/

{

for (j=0;j

{

for

(i=0;i

{

W[j][i]=W[j][i]+b*e_err[j]*P[i];

}

YU_HN[j]+=b*e_err[j];

}

}

else if(n>1)

{

for(j=0;j

{

for(i=0;i

{

W[j][i]=W[j][i]+b*e_err[j]*P[i]+alpha*(W[j][i]-Old_WV[(n-1)].old_W[j][i]);

}

YU_HN[j]+=b*e_err[j];

}

}

return 1;

}

double Err_Sum()

{int m;

double total_err=0;

for(m=0;m

{

total_err+=err_m[m];

}

return total_err;

}

void savequan()

{ int i,j,k;

int ii,jj,kk;

if((fp=fopen("f:\\bp\\权值.txt","a"))==NULL) /*save the result at f:\hsz\bpc\*.txt*/

{

printf("Cannot open file strike any key

exit!");

getch();

exit(1);

}

fprintf(fp,"Save the result of “权值”(quanzhi) as

follows:\n");

for(i=0;i

{

for(j=0;j

fprintf(fp,"W[%d][%d]=%f\n",i,j,W[i][j]);

}

fprintf(fp,"\n");

for(ii=0;ii

{

for(jj=0;jj

fprintf(fp,"V[%d][%d]=%f\n",ii,jj,V[ii][jj]);

}

fclose(fp);

printf("\nThe result of “权值.txt”(quanzhi) has been saved

successfully!\nPress any key to continue...");

getch();

if((fp=fopen("f:\\bp\\阈值.txt","a"))==NULL) /*save the result at f:\hsz\bpc\*/

{

printf("Cannot open file strike any key

exit!");

getch();

exit(1);

}

fprintf(fp,"Save the result of “输出层的阈值”(huozhi) as

follows:\n");

for(k=0;k

fprintf(fp,"YU_ON[%d]=%f\n",k,YU_ON[k]);

fprintf(fp,"\nSave the result of “隐层的阈值为”(huozhi) as

follows:\n");

for(kk=0;kk

fprintf(fp,"YU_HN[%d]=%f\n",kk,YU_HN[kk]);

fclose(fp);

printf("\nThe result of “阈值.txt”(huozhi) has been saved

successfully!\nPress any key to continue...");

getch();

}

/**********************/

/**程序入口,即主程序**/

/**********************/

void main()

{double Pre_error;

double sum_err;

int study;

int flag;

flag=30000;

a=0.7;

b=0.7;

alpha=0.9;

study=0;

Pre_error=0.0001;/*实际值为Pre_error=0.0001;*/

Start_Show();

GetTrainingData();

initial();

do

{int m;

++study;

for(m=0;m

{

input_P(m);

input_T(m);

H_I_O();

O_I_O();

Err_O_H(m);

Err_H_I();

saveWV(m); /****************/

Delta_O_H(m); /*(m,study)*/

Delta_H_I(m); /*(m,study)*/

}

sum_err=Err_Sum();

printf("sum_err=%f\n",sum_err);

printf("Pre_error=%f\n\n",Pre_error);

if(study>flag)

{

printf("\n*******************************\n");

printf("The

program is ended by itself because of error!\nThe learning times is

surpassed!\n");

printf("*****************************\n");

getch();

break;

}

}while (sum_err>Pre_error);

printf("\n****************\n");

printf("\nThe program have studyed for [%d] times!\n",study);

printf("\n****************\n");

savequan(); /*save the results*/

End_Show();

}

==========================

权值.txt

{Save the result of “权值”(quanzhi) as follows:

W[0][0]=0.350578

W[0][1]=-1.008697

W[0][2]=-0.962250

W[1][0]=0.055661

W[1][1]=-0.372367

W[1][2]=-0.890795

W[2][0]=0.129752

W[2][1]=-0.332591

W[2][2]=-0.521561

V[0][0]=-2.932654

V[0][1]=-3.720583

V[0][2]=-2.648183

V[1][0]=2.938970

V[1][1]=1.633281

V[1][2]=1.944077

}

阈值.txt

{Save the result of “输出层的阈值”(huozhi) as follows:

YU_ON[0]=-4.226843

YU_ON[1]=1.501791

Save the result of “隐层的阈值为”(huozhi) as follows:

YU_HN[0]=-0.431459

YU_HN[1]=0.452127

YU_HN[2]=0.258449

}

==================================

以上程序为VC++的程序改制而成!

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
ASO优化算法是一种基于蚁群算法的优化算法,它可以用于解决多目标优化问题。BP神经网络算法可以用来进行分类、回归等任务。在使用BP算法进行ASO优化时,可以将多个目标转化为一个目标,然后使用BP神经网络进行训练和预测。 以下是一些实现ASO优化BP算法C语言代码示例: 1. BP神经网络的实现 ```c //定义神经元结构体 typedef struct neuron { double input; //输入 double output; //输出 double delta; //误差 double bias; //偏置 double *weights; //权重 } neuron_t; //定义层结构体 typedef struct layer { int num_neurons; //神经元数量 neuron_t *neurons; //神经元 } layer_t; //定义神经网络结构体 typedef struct neural_network { int num_layers; //层数 layer_t *layers; //层 } neural_network_t; //初始化神经元 void init_neuron(neuron_t *neuron, int num_weights) { neuron->input = 0.0; neuron->output = 0.0; neuron->delta = 0.0; neuron->bias = (double)rand() / RAND_MAX; //随机初始化偏置 neuron->weights = (double *)malloc(num_weights * sizeof(double)); //动态分配权重数组 for (int i = 0; i < num_weights; i++) { neuron->weights[i] = (double)rand() / RAND_MAX; //随机初始化权重 } } //初始化层 void init_layer(layer_t *layer, int num_neurons, int num_weights) { layer->num_neurons = num_neurons; layer->neurons = (neuron_t *)malloc(num_neurons * sizeof(neuron_t)); //动态分配神经元数组 for (int i = 0; i < num_neurons; i++) { init_neuron(&layer->neurons[i], num_weights); } } //初始化神经网络 void init_neural_network(neural_network_t *nn, int num_inputs, int num_outputs, int num_hidden_layers, int num_hidden_neurons) { nn->num_layers = 2 + num_hidden_layers; //输入层、输出层和隐藏层 nn->layers = (layer_t *)malloc(nn->num_layers * sizeof(layer_t)); //动态分配层数组 //初始化输入层 init_layer(&nn->layers[0], num_inputs, 0); //初始化隐藏层 for (int i = 0; i < num_hidden_layers; i++) { if (i == 0) { init_layer(&nn->layers[i+1], num_hidden_neurons, num_inputs); } else { init_layer(&nn->layers[i+1], num_hidden_neurons, num_hidden_neurons); } } //初始化输出层 init_layer(&nn->layers[nn->num_layers-1], num_outputs, num_hidden_neurons); } //激活函数 double activation_function(double x) { return 1.0 / (1.0 + exp(-x)); } //前向传播 void feed_forward(neural_network_t *nn, double *inputs) { //输入层 for (int i = 0; i < nn->layers[0].num_neurons; i++) { nn->layers[0].neurons[i].output = inputs[i]; } //隐藏层和输出层 for (int i = 1; i < nn->num_layers; i++) { for (int j = 0; j < nn->layers[i].num_neurons; j++) { double sum = 0.0; for (int k = 0; k < nn->layers[i-1].num_neurons; k++) { sum += nn->layers[i-1].neurons[k].output * nn->layers[i].neurons[j].weights[k]; } sum += nn->layers[i].neurons[j].bias; nn->layers[i].neurons[j].input = sum; nn->layers[i].neurons[j].output = activation_function(sum); } } } //计算输出误差 void compute_output_error(neural_network_t *nn, double *targets) { layer_t *output_layer = &nn->layers[nn->num_layers-1]; for (int i = 0; i < output_layer->num_neurons; i++) { double output = output_layer->neurons[i].output; double delta = targets[i] - output; output_layer->neurons[i].delta = delta * output * (1.0 - output); } } //计算隐藏层误差 void compute_hidden_error(layer_t *layer, layer_t *next_layer) { for (int i = 0; i < layer->num_neurons; i++) { double output = layer->neurons[i].output; double sum = 0.0; for (int j = 0; j < next_layer->num_neurons; j++) { sum += next_layer->neurons[j].weights[i] * next_layer->neurons[j].delta; } layer->neurons[i].delta = output * (1.0 - output) * sum; } } //反向传播 void backpropagation(neural_network_t *nn, double *targets, double learning_rate) { //计算输出层误差 compute_output_error(nn, targets); //计算隐藏层误差 for (int i = nn->num_layers-2; i > 0; i--) { compute_hidden_error(&nn->layers[i], &nn->layers[i+1]); } //更新权重和偏置 for (int i = nn->num_layers-1; i > 0; i--) { for (int j = 0; j < nn->layers[i].num_neurons; j++) { neuron_t *neuron = &nn->layers[i].neurons[j]; for (int k = 0; k < nn->layers[i-1].num_neurons; k++) { double delta_weight = learning_rate * neuron->delta * nn->layers[i-1].neurons[k].output; neuron->weights[k] += delta_weight; } neuron->bias += learning_rate * neuron->delta; } } } //训练神经网络 void train_neural_network(neural_network_t *nn, double **inputs, double **targets, int num_examples, double learning_rate, int epochs) { for (int epoch = 0; epoch < epochs; epoch++) { double error = 0.0; for (int example = 0; example < num_examples; example++) { feed_forward(nn, inputs[example]); compute_output_error(nn, targets[example]); error += 0.5 * pow(targets[example][0] - nn->layers[nn->num_layers-1].neurons[0].output, 2); backpropagation(nn, targets[example], learning_rate); } printf("Epoch %d: error = %lf\n", epoch, error); } } //使用神经网络进行预测 double predict(neural_network_t *nn, double *inputs) { feed_forward(nn, inputs); return nn->layers[nn->num_layers-1].neurons[0].output; } ``` 2. ASO优化算法实现 ```c //定义蚂蚁结构体 typedef struct ant { double *position; //位置 double *velocity; //速度 double *best_position; //最佳位置 double best_fitness; //最佳适应度 } ant_t; //初始化蚂蚁 void init_ant(ant_t *ant, int num_dimensions) { ant->position = (double *)malloc(num_dimensions * sizeof(double)); //动态分配位置数组 ant->velocity = (double *)malloc(num_dimensions * sizeof(double)); //动态分配速度数组 ant->best_position = (double *)malloc(num_dimensions * sizeof(double)); //动态分配最佳位置数组 for (int i = 0; i < num_dimensions; i++) { ant->position[i] = (double)rand() / RAND_MAX; //随机初始化位置 ant->velocity[i] = 0.0; //初始化速度为0 ant->best_position[i] = ant->position[i]; //最佳位置初始化为当前位置 } ant->best_fitness = DBL_MAX; //最佳适应度初始化为最大值 } //计算适应度 double fitness_function(ant_t *ant, neural_network_t *nn, double **inputs, double *targets, int num_examples) { double error = 0.0; for (int example = 0; example < num_examples; example++) { double output = predict(nn, inputs[example]); error += 0.5 * pow(targets[example] - output, 2); } return error; } //更新速度和位置 void update_velocity_and_position(ant_t *ant, ant_t *global_best_ant, double inertia_weight, double cognitive_weight, double social_weight) { for (int i = 0; i < num_dimensions; i++) { double r1 = (double)rand() / RAND_MAX; //随机数1 double r2 = (double)rand() / RAND_MAX; //随机数2 ant->velocity[i] = inertia_weight * ant->velocity[i] + cognitive_weight * r1 * (ant->best_position[i] - ant->position[i]) + social_weight * r2 * (global_best_ant->best_position[i] - ant->position[i]); ant->position[i] += ant->velocity[i]; if (ant->position[i] < 0.0) { ant->position[i] = 0.0; } else if (ant->position[i] > 1.0) { ant->position[i] = 1.0; } } } //ASO优化算法 void ASO(neural_network_t *nn, double **inputs, double *targets, int num_examples, int num_ants, int num_iterations, double inertia_weight, double cognitive_weight, double social_weight) { //初始化蚂蚁 ant_t *ants = (ant_t *)malloc(num_ants * sizeof(ant_t)); for (int i = 0; i < num_ants; i++) { init_ant(&ants[i], num_dimensions); } //计算适应度 double *fitness = (double *)malloc(num_ants * sizeof(double)); for (int i = 0; i < num_ants; i++) { fitness[i] = fitness_function(&ants[i], nn, inputs, targets, num_examples); if (fitness[i] < global_best_fitness) { global_best_fitness = fitness[i]; memcpy(global_best_position, ants[i].position, num_dimensions * sizeof(double)); } } //ASO优化循环 for (int iteration = 0; iteration < num_iterations; iteration++) { for (int i = 0; i < num_ants; i++) { update_velocity_and_position(&ants[i], &global_best_ant, inertia_weight, cognitive_weight, social_weight); double fitness_new = fitness_function(&ants[i], nn, inputs, targets, num_examples); if (fitness_new < fitness[i]) { fitness[i] = fitness_new; memcpy(ants[i].best_position, ants[i].position, num_dimensions * sizeof(double)); if (fitness_new < global_best_fitness) { global_best_fitness = fitness_new; memcpy(global_best_position, ants[i].position, num_dimensions * sizeof(double)); } } } } } ``` 将BP神经网络和ASO优化算法结合起来,可以实现ASO优化BP算法。 ```c int main() { srand(time(NULL)); //输入数据 double inputs[NUM_EXAMPLES][NUM_INPUTS] = { {0.0, 0.0}, {0.0, 1.0}, {1.0, 0.0}, {1.0, 1.0} }; //目标数据 double targets[NUM_EXAMPLES] = {0.0, 1.0, 1.0, 0.0}; //初始化神经网络 neural_network_t nn; init_neural_network(&nn, NUM_INPUTS, 1, 1, 4); //训练神经网络 train_neural_network(&nn, inputs, targets, NUM_EXAMPLES, LEARNING_RATE, EPOCHS); //ASO优化BP算法 ASO(&nn, inputs, targets, NUM_EXAMPLES, NUM_ANTS, NUM_ITERATIONS, INERTIA_WEIGHT, COGNITIVE_WEIGHT, SOCIAL_WEIGHT); //使用神经网络进行预测 for (int i = 0; i < NUM_EXAMPLES; i++) { double output = predict(&nn, inputs[i]); printf("Input: %lf %lf, Target: %lf, Output: %lf\n", inputs[i][0], inputs[i][1], targets[i], output); } return 0; } ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值