神经网络代码

clear all;
clc;

%% 数据导入
iris_data=csvread("iris_dataset.csv");

%% 数据预处理
nums_train=70;
nums_test=30;
Dimension=5;

[train_set,test_set]=split(iris_data, nums_train, nums_test);
training_x = train_set(:,1:4)';
training_y = train_set(:,5)';
testing_x = test_set(:,1:4)';
testing_y = test_set(:,5)';

%% 神经网络初始化
%参数设置
Net_Scale = [4 5 1];
step = 0.01;
nums_iteration = 200;
%偏置添加 & 初始化
global Neural_Net_layer_1;
global Neural_Net_layer_2;
Neural_Net_layer_1 = rand([Net_Scale(2) Net_Scale(1)]+[0 1]);
Neural_Net_layer_2 = rand([Net_Scale(3) Net_Scale(2)]+[0 1]);

%% 神经网络训练
train_neural_net(training_x,training_y,step, nums_iteration);

%% 神经网络评价
error = 0.02;
accuracy = test_neural_net(testing_x,testing_y,error);
fprintf("the accuracy is %.1f\n%",100*accuracy);

神经网络训练

function []= train_neural_net(training_x,training_y,step,nums_iteration)
%接收每层神经网络层参数, 训练数据
%返回每层神经网络参数
global Neural_Net_layer_1;
global Neural_Net_layer_2;

%每层参数规模
size_layer_1=size(Neural_Net_layer_1);
size_layer_2=size(Neural_Net_layer_2);

for i = 1: nums_iteration

%输出层计算 arrayfun函数:激活函数(Sigmoid)作用于矩阵内每个数值
%第一层
W_X_1 = Neural_Net_layer_1(:,1: size_layer_1(2)-1) * training_x;
W_X_B_1 = W_X_1+ Neural_Net_layer_1(:,size_layer_1(2));
layer_1 = arrayfun(@(x) 1/(1+exp(-x)), W_X_B_1);


%第二层
W_X_2 = Neural_Net_layer_2(:,1: size_layer_2(2)-1) * layer_1;
W_X_B_2 = W_X_2+ Neural_Net_layer_2(:,size_layer_2(2));
layer_2 = arrayfun(@(x) 1/(1+exp(-x)), W_X_B_2);

%损失函数误差计算
%第三层
a_grad_3 = arrayfun(@(x,y) (1-y)./(1-x)-(y./x),layer_2,training_y);

%第二层
a_grad_2 = Neural_Net_layer_2(:,1: size_layer_2(2)-1)'*a_grad_3;

%激活函数自变量梯度计算
%第三层
z_grad_3 = a_grad_3.*layer_2.*(1-layer_2);

%第二层
z_grad_2 = a_grad_2.*layer_1.*(1-layer_1);

%梯度下降
%W的梯度下降
Neural_Net_layer_2(:,1: size_layer_2(2)-1) = Neural_Net_layer_2(:,1: size_layer_2(2)-1) - step.*z_grad_3*layer_1';
Neural_Net_layer_1(:,1: size_layer_1(2)-1) = Neural_Net_layer_1(:,1: size_layer_1(2)-1) - step.*z_grad_2*training_x';

%b的梯度下降
Neural_Net_layer_2(:,size_layer_2(2)) = Neural_Net_layer_2(:,size_layer_2(2)) - sum(step*z_grad_3,2);
Neural_Net_layer_1(:,size_layer_1(2)) = Neural_Net_layer_1(:,size_layer_1(2)) - sum(step*z_grad_2,2);
end
end

神经网络测试

function [accuracy] = test_neural_net(testing_x,testing_y,error)
global Neural_Net_layer_1;
global Neural_Net_layer_2;

%测试集规模
size_test=size(testing_x);

%每层参数规模
size_layer_1=size(Neural_Net_layer_1);
size_layer_2=size(Neural_Net_layer_2);

%输出层计算
%第一层
W_X_1 = Neural_Net_layer_1(:,1: size_layer_1(2)-1) * testing_x;
W_X_B_1 = W_X_1+ Neural_Net_layer_1(:,size_layer_1(2));
layer_1 = arrayfun(@(x) 1/(1+exp(-x)), W_X_B_1);
%第二层
W_X_2 = Neural_Net_layer_2(:,1: size_layer_2(2)-1) * layer_1;
W_X_B_2 = W_X_2+ Neural_Net_layer_2(:,size_layer_2(2));
layer_2 = arrayfun(@(x) 1/(1+exp(-x)), W_X_B_2);

%正确率计算
accuracy = sum(abs(layer_2-testing_y)<=error)/size_test(2);
end

  • 1
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值