c 语言实现神经网络算法,神经网络BP算法(C程序实现)[转载]

文件输入输出目录为:F:\BP\

训练样本文件名:训练样本.txt

值为:

1

1

-1

1

-1

1

0

1

0

1

输出文件名为:阈值.txt 权值.txt

=========================

#include "stdlib.h"

#include "math.h"

#include "conio.h"

#include "stdio.h"

#define N 2 /*/学习样本个数*/

#define IN 3 /*/输入层神经元数目*/

#define HN 3 /*/隐层神经元数目*/

#define ON 2 /*/输出层神经元数目*/

#define Z 20 /*/旧权值保存-》每次study的权值都保存下来*/

double P[IN]; /*/单个样本输入数据*/

double T[ON]; /*/单个样本教师数据*/

double W[HN][IN]; /*/输入层至隐层权值*/

double V[ON][HN]; /*/隐层至输出层权值*/

double X[HN]; /*/隐层的输入*/

double Y[ON]; /*/输出层的输入*/

double H[HN]; /*/隐层的输出*/

double O[ON]; /*/输出层的输出*/

double YU_HN[HN]; /*/隐层的阈值*/

double YU_ON[ON]; /*/输出层的阈值*/

double err_m[N]; /*/第m个样本的总误差*/

double a; /*/输出层至隐层的学习效率*/

double b; /*/隐层至输入层学习效率*/

double alpha; /*/动量因子,改进型bp算法使用*/

double d_err[ON];

FILE *fp;

/*定义一个放学习样本的结构*/

struct {

double input[IN];

double teach[ON];

}Study_Data[N];

/*改进型bp算法用来保存每次计算的权值*/

struct {

double old_W[HN][IN];

double old_V[ON][HN];

}Old_WV[Z];

int Start_Show()

{

clrscr();

printf("\n ***********************\n");

printf(" * Welcome to

use *\n");

printf(" * this program

of *\n");

printf(" * calculating the BP *\n");

printf(" * model! *\n");

printf(" * Happy every

day! *\n");

printf(" ***********************\n");

printf("\n\nBefore starting,please read the follows

carefully:\n\n");

printf(" 1.Please ensure the Path of the '训练样本.txt'(xunlianyangben.txt)

is \ncorrect,like 'F:\BP\训练样本.txt'!\n");

printf(" 2.The calculating results will be saved in the Path of

'F:\\BP\\'!\n");

printf(" 3.The program will load 10 datas when running from

'F:\\BP\\训练样本.txt'!\n");

printf(" 4.The program of BP can study itself for no more than 30000

times.\nAnd surpassing the number,the program will be ended by

itself in\npreventing running infinitely because of

error!\n");

printf("\n\n\n");

printf("Now press any key to start...\n");

getch();

getch();

clrscr();

  • 1
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一段C语言实现的BP神经网络算法的代码: ```c #include <stdio.h> #include <stdlib.h> #include <math.h> #define INPUT_NUM 2 #define HIDDEN_NUM 3 #define OUTPUT_NUM 1 #define LEARNING_RATE 0.5 #define EPOCHS 10000 double sigmoid(double x) { return 1.0 / (1.0 + exp(-x)); } double sigmoid_derivative(double x) { return x * (1.0 - x); } int main() { double inputs[INPUT_NUM][4] = {{0, 0, 1, 1}, {0, 1, 0, 1}}; double outputs[OUTPUT_NUM][4] = {{0, 1, 1, 0}}; double hidden_weights[HIDDEN_NUM][INPUT_NUM + 1]; double output_weights[OUTPUT_NUM][HIDDEN_NUM + 1]; double hidden_outputs[HIDDEN_NUM]; double output; double error; double delta_output; double delta_hidden[HIDDEN_NUM]; double delta_weight; // initialize weights for (int i = 0; i < HIDDEN_NUM; i++) { for (int j = 0; j < INPUT_NUM + 1; j++) { hidden_weights[i][j] = ((double) rand() / RAND_MAX) * 2.0 - 1.0; } } for (int i = 0; i < OUTPUT_NUM; i++) { for (int j = 0; j < HIDDEN_NUM + 1; j++) { output_weights[i][j] = ((double) rand() / RAND_MAX) * 2.0 - 1.0; } } // train for (int epoch = 0; epoch < EPOCHS; epoch++) { for (int i = 0; i < 4; i++) { // forward pass for (int j = 0; j < HIDDEN_NUM; j++) { double sum = hidden_weights[j][0]; for (int k = 0; k < INPUT_NUM; k++) { sum += hidden_weights[j][k + 1] * inputs[k][i]; } hidden_outputs[j] = sigmoid(sum); } double sum = output_weights[0][0]; for (int j = 0; j < HIDDEN_NUM; j++) { sum += output_weights[0][j + 1] * hidden_outputs[j]; } output = sigmoid(sum); // backward pass error = outputs[0][i] - output; delta_output = error * sigmoid_derivative(output); for (int j = 0; j < HIDDEN_NUM; j++) { delta_hidden[j] = delta_output * output_weights[0][j + 1] * sigmoid_derivative(hidden_outputs[j]); } for (int j = 0; j < HIDDEN_NUM; j++) { delta_weight = LEARNING_RATE * delta_hidden[j]; hidden_weights[j][0] += delta_weight; for (int k = 0; k < INPUT_NUM; k++) { hidden_weights[j][k + 1] += delta_weight * inputs[k][i]; } } delta_weight = LEARNING_RATE * delta_output; output_weights[0][0] += delta_weight; for (int j = 0; j < HIDDEN_NUM; j++) { output_weights[0][j + 1] += delta_weight * hidden_outputs[j]; } } } // test for (int i = 0; i < 4; i++) { for (int j = 0; j < HIDDEN_NUM; j++) { double sum = hidden_weights[j][0]; for (int k = 0; k < INPUT_NUM; k++) { sum += hidden_weights[j][k + 1] * inputs[k][i]; } hidden_outputs[j] = sigmoid(sum); } double sum = output_weights[0][0]; for (int j = 0; j < HIDDEN_NUM; j++) { sum += output_weights[0][j + 1] * hidden_outputs[j]; } output = sigmoid(sum); printf("Input: %lf %lf, Output: %lf\n", inputs[0][i], inputs[1][i], output); } return 0; } ``` 这段代码实现了一个简单的BP神经网络,用于解决异或问题。它包括了前向传播、反向传播和权重更新三个步骤,以及一个简单的测试过程。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值