ppp

#-*- coding: UTF-8 -*-
from sklearn.datasets import load_iris
import numpy as np
import math


iris = load_iris()
data = iris.data
label=iris.target
label.shape=(1, 150)  
label=np.transpose(label) 
data_set = np.hstack((label, data))

num_DataLayer = 4
num_HiddenLayer1 = 5
num_HiddenLayer2 = 5
num_HiddenLayer3 = 4
num_OutputLayer = 3

#初始化每层元素的激活值
net_DataLayer = np.zeros(num_DataLayer)
net_HiddenLayer1 = np.zeros(num_HiddenLayer1)
net_HiddenLayer2 = np.zeros(num_HiddenLayer2)
net_HiddenLayer3 = np.zeros(num_HiddenLayer3)
net_OutputLayer = np.zeros(num_OutputLayer)

#初始化每层元素的未激活值
active_DataLayer = np.zeros(num_DataLayer)
active_HiddenLayer1 = np.zeros(num_HiddenLayer1)
active_HiddenLayer2 = np.zeros(num_HiddenLayer2)
active_HiddenLayer3 = np.zeros(num_HiddenLayer3)
active_OutputLayer = np.zeros(num_OutputLayer)


WDH1 = np.random.random((num_HiddenLayer1, num_DataLayer))
WH1H2 = np.random.random((num_HiddenLayer2, num_HiddenLayer1))
WH2H3 = np.random.random((num_HiddenLayer3, num_HiddenLayer2))
WH3O = np.random.random((num_OutputLayer, num_HiddenLayer3))

biasDH1 = 1;
biasH1H2 = 1;
biasH2H3 = 1;
bias3O = 1;


##############################################
# 激活函数sigmoid
def sigmoid(x):
    return 1 / (1 + np.exp(x))


##############################################
def forward(DataLayer):
    global net_DataLayer
    global net_HiddenLayer1
    global net_HiddenLayer2
    global net_HiddenLayer3
    global net_OutputLayer
    
    global active_DataLayer
    global active_HiddenLayer1
    global active_HiddenLayer2
    global active_HiddenLayer3
    global active_OutputLayer

    net_HiddenLayer1 = np.dot(WDH1, DataLayer) + biasDH1
    active_HiddenLayer1 = sigmoid(net_HiddenLayer1)
    
    net_HiddenLayer2 = np.dot(WH1H2, active_HiddenLayer1) + biasH1H2
    active_HiddenLayer2 = sigmoid(net_HiddenLayer2)
    
    net_HiddenLayer3 = np.dot(WH2H3, active_HiddenLayer2) + biasH2H3
    active_HiddenLayer3 = sigmoid(net_HiddenLayer3)
    
    net_OutputLayer = np.dot(WH3O, active_HiddenLayer3) + bias3O
    active_OutputLayer = sigmoid(net_OutputLayer)
    
    return active_OutputLayer


##############################################
def loss(label_true, label_predict):
    Y = [0 for i in range(num_OutputLayer)]
    Y[label_true] = 1;
    loss = label_predict - Y
    loss = (loss**2) / 2
    total_loss = sum(loss);
    return total_loss, loss

#计算loss对每一个权重系数的偏导数
##############################################
def cal_delta_WH3O(label_true, label_predict):
    global active_OutputLayer    
    global net_OutputLayer
    global active_HiddenLayer3

    print("cal_delta_WH3O")
    loss_active = label_predict - label_true
    active_net = active_OutputLayer*(1-active_OutputLayer)
    net_weights = active_HiddenLayer3
    net_weights.shape = (1, num_HiddenLayer3)
    net_weights = np.transpose(net_weights)
    delta_WH3O = np.dot(np.multiply(loss_active, active_net), net_weights)
    return delta_WH3O

##############################################
def cal_delta_H2H3():
    print("cal_delta_WH3O")

##############################################
def cal_delta_H1H2():
    print("cal_delta_WH3O")

##############################################
def cal_delta_DH1():
    print("cal_delta_WH3O")


##############################################
def update_weights():
    print("update_weights")

##############################################
def backward():
    print("backward")



s = 1
DataLayer = data[s, :]
print(DataLayer)
forward(DataLayer)

loss = loss(int(label[s]), forward(DataLayer))
loss_total = loss[0]
loss_OutputLayer = loss[1]
print(loss_OutputLayer)
                                                                                                                                            


ttt = 2
def f():
    global ttt
    ttt = 5
    
f()
print(ttt)






  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值