Bp网络(回归)Numpy

import numpy as np
import matplotlib.pyplot as plt

INPUT_SIZE = 2
HIDDEN_SIZE = 4
OUTPUT_SIZE = 2
LEARNING_RATE = 0.000001

def get_weights(last, next):
    return np.random.rand(last, next)


def get_bias(size):
    return np.random.rand(size)


def sigmoid(input):
    return 1 / (1 + np.exp(-input))


def rule(input):
    return np.maximum(input, 0)
def rule_d(input):
    x = np.array(input)
    for i in range(input.size):
        if input[i] > 0:
            x[i]=1
        else:
            x[i] = 0
    return x

def get_derivatives_of_sigmoid(input):
    return input * (1 - input)


def loss_MSE(pre, real):
    out = np.power(pre - real, 2)
    MSE = np.array([0], dtype=np.float32)
    for i in out:
        MSE[0] = MSE[0] + i
    MSE[0] = MSE / pre.size
    return MSE, pre - real


# 参数生成
w1 = get_weights(INPUT_SIZE, HIDDEN_SIZE)
b1 = get_bias(HIDDEN_SIZE)

w2 = get_weights(HIDDEN_SIZE, OUTPUT_SIZE)
b2 = get_bias(OUTPUT_SIZE)

# train
loss_numpy = np.array([],dtype=np.float32)
for epoch in range(212):
    # 一次Forward:
    input = np.array([epoch + 1, epoch + 1])
    hidden_a = np.dot(input, w1) + b1

    #hidden_h = hidden_a
    # hidden_h = sigmoid(hidden_a)
    hidden_h = rule(hidden_a)

    output_a = np.dot(hidden_h, w2) + b2
    output_h = output_a
    # output_h = sigmoid(output_a)
    target = np.array([epoch + 2, epoch + 2])

    # 一次Backward:
    loss, derivatives = loss_MSE(output_h, target)
    derivatives_output = derivatives  # * get_derivatives_of_sigmoid(output_h)
    derivatives_hidden = (np.dot(derivatives_output, w2.transpose())*rule_d(hidden_h)).transpose()
    # * hidden_h * (1 - hidden_h)
    gradient_w1 = np.zeros(w1.shape, dtype=np.float32)
    gradient_w2 = np.zeros(w2.shape, dtype=np.float32)
    x, y = w1.shape
    for i in range(x):
        for j in range(y):
            gradient_w1[i][j] = input[i] * derivatives_hidden[j]
    w1 = w1 - gradient_w1 * LEARNING_RATE
    x, y = w2.shape
    for i in range(x):
        for j in range(y):
            gradient_w2[i][j] = hidden_h[i] * derivatives_output[j]
    w2 = w2 - gradient_w2 * LEARNING_RATE
    loss_numpy = np.insert(loss_numpy, loss_numpy.size, loss)
    print(epoch, ":", loss, "out", output_h)

plt.plot(loss_numpy, 'r-', )
plt.title('loss function(train)', fontsize='large')
plt.xlabel('step (100)')
plt.ylabel('loss')
plt.show()

loss_numpy = np.array([],dtype=np.float32)
# text data
for epoch in range(300):
    # 一次Forward:
    input = np.array([epoch + 1, epoch + 1])
    target = np.array([epoch+2,epoch+2])
    hidden_a = np.dot(input, w1) + b1

    hidden_h = hidden_a
    # hidden_h = sigmoid(hidden_a)

    output_a = np.dot(hidden_h, w2) + b2
    output_h = output_a
    loss, derivatives = loss_MSE(output_h, target)
    print(epoch, ": input:", input, "out", output_h)
    print(loss)
    loss_numpy = np.insert(loss_numpy, loss_numpy.size, loss)
plt.plot(loss_numpy, 'r-', )
plt.title('loss function(text)', fontsize='large')
plt.xlabel('step (100)')
plt.ylabel('loss')
plt.show()

训练集中的MSE(均方误差)

测试集的MSE(均方误差) (注意分度值,应该发生了overfiting QaQ)

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值