误差反向传播的一些代码1

神经网络有合适的权重(W)和偏置(b),调整权重和偏置以便拟合训练数据的过程称之为学习。分为下面四个步骤:

步骤1:从训练集中随机选择一部分数据

步骤2:计算损失函数关于各个权重参数(W)的梯度

步骤3:将权重沿梯度方向进行微小的更新

步骤4:重复步骤1、步骤2、步骤3

#乘法层
class MulLayer:
    def __init__(self):
        self.x = None
        self.y = None

    def forward(self, x, y):
        self.x = x
        self.y = y
        out = x * y

        return out
    def backward(self, dout):
        dx = dout * self.y
        dy = dout * self.x

        return dx, dy
#加法层
class AddLayer:
    def __init__(self):
        pass

    def forward(self, x, y):
        out = x + y
        return out
    def backward(self, dout):
        dx = dout * 1
        dy = dout * 1
        return dx, dy

仅使用加法层和乘法层写一个例子,如下:

#应用
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1

#layer
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange_layer = AddLayer()
mul_tax_layer = MulLayer()

#forward
apple_price = mul_apple_layer.forward(apple, apple_num,)#(1)
orange_price = mul_orange_layer.forward(orange, orange_num)#(2)
all_price = add_apple_orange_layer.forward(apple_price, orange_price)#(3)
price = mul_tax_layer.forward(all_price,tax)#(4)

#backward
dprice = 1
dall_price, dtax = mul_tax_layer.backward(dprice)#(4)
dapple_price, dorange_price = add_apple_orange_layer.backward(dall_price)#(3)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)#(2)
dapple, dapple_num = mul_apple_layer.backward(dapple_price)#(1)

print(price)#715
print(dapple_num, dapple, dorange, dorange_num, dtax)#110, 2.2, 3.3, 165, 650"""

激活函数

#激活函数Relu层
import numpy as np

class Relu:
    def __init__(self):
        self.mask = None

    def forward(self, x):
        self.mask = (x <= 0)
        out = x.copy()
        out[self.mask] = 0

        return out

    def backward(self, dout):
        dout[self.mask] = 0
        dx = dout

        return dx

激活函数ReLu层应用

#激活函数ReLu层应用
x = np.array([[1.0, -0.5], [-2.0, 3.0]])
print(x)

mask = (x <= 0)#mask在一定范围内的数值转换为true,其它范围为false
print(mask)

#激活函数sigmoid层
import numpy as np

class Sigmoid:
    def __init__(self):
        self.out = None
        
    def forward(self, x):
        out = 1 / (1 + np.exp(-x))
        self.out = out
        
        return out
    
    def backward(self, dout):
        dx = dout * (1.0 - self.out) * self.out
        
        return dx

#批归一化Affine层

import numpy as np
class Affine:
    def __init__(self, W, b):
        self.W = W
        self.b = b
        self.x = None
        self.dw = None
        self.db = None

    def forward(self, x):
        self.x = x
        out = np.dot(x, self.W) + self.b

        return out

    def backward(self, dout):
        dx = np.dot(dout, self.W.T)
        self.dW = np.dot(self.x.T, dout)
        self.db = np.sum(dout, axis=0)#axis=0表示是一个一维数据

        return dx

Softmax-with-Loss层实现代码:

Cross Entropy Error:交叉熵误差

#Softmax-with-Loss层的实现
import numpy as np

class SoftmaxWithLoss:
    def __init__(self):
        self.loss = None #损失
        self.y = None    #softmax的输出
        self.t = None    #监督数据(one—hot vector)

#################定义一个softmax函数###########
    def softmax(self, a):
        c = np.max(a)
        exp_a = np.exp(a - c)
        sum_exp_a = np.sum(exp_a)
        y = exp_a / sum_exp_a

        return y
###############定义一个交叉熵误差函数###############
    def cross_entropy_error(self, y, t):
        if y.ndim == 1:
            t = t.reshape(1, t.size)
            y = y.reshape(1, y.size)

        batch_size = y.shape[0]
        return np.sum(np.log(y[np.arange(batch_size),t] + 1e-7)) / batch_size

    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)
        self.loss = cross_entropy_error(self.y, self.t)

        return self.loss

    def backward(self, dout=1):
        batch_size = self.t.shape[0]
        dx = (self.y - self.t) / batch_size

        return dx

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值