ANN学习

1.0 ANN线性回归

# 根据图书工具《python深度学习--算法实践》,author:sudharsan Ravichandiran

import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Prepare the data:XOR
X = np.array([[0, 1], [1, 0], [1, 1], [0, 1]])
y = np.array([[1], [1], [0], [0]])

# define the number of nodes in each layer:
num_input = 2
num_hidden = 5
num_output = 1

# initialize the weights and bias randomly
# firstly, initialize the input to hidden layer weight:
Wxh = np.random.randn(num_input, num_hidden)
bh = np.zeros((1, num_hidden))
print(bh)

# initialize the hidden layer to output layer
Why = np.random.randn(num_hidden, num_output)
by = np.zeros((1, num_output))


# define the sigmoid activation function:
def sigmoid(z):
    return 1 / (1+np.exp(-z))


# define derivative of the sigmoid function
def sigmoid_derivative(z):
    return np.exp(-z) / ((1+np.exp(-z))**2)


# define the forward propagation:
def forward_prop(X, Wxh, Why):
    z1 = np.dot(X, Wxh) + bh
    a1 = sigmoid(z1)
    z2 = np.dot(a1, Why) + by
    y_hat = sigmoid(z2)
    return z1, a1, z2, y_hat


# define the backward propagation:
def backward_prop(y_hat, z1, a1, z2):
    # 根据偏微分计算公式得出follow,推导公式的前两项 Page30
    delta2 = np.multiply(-(y - y_hat), sigmoid_derivative(z2))
    # 关于隐藏层到输出层的权重的微分,根据公式2
    dJ_dWhy = np.dot(a1.T, delta2)  # .T表示转置矩阵
    # 关于W_xh的梯度,公式4
    delta1 = np.dot(delta2, Why.T) * sigmoid_derivative(z1)
    dJ_dWxh = np.dot(X.T, delta1)

    return dJ_dWxh, dJ_dWhy


# define the cost function:
def cost_function(y, y_hat):
    J = 0.5 * sum((y - y_hat)**2)
    return J


# set the learning rate and the number of iterations:
alpha = 0.1
num_iterations = 50000

# NOW~~~
# Start training the network with the following code:
cost = []
for i in range(num_iterations):
    z1, a1, z2, y_hat = forward_prop(X, Wxh, Why)
    dJ_dWxh, dJ_dWhy = backward_prop(y_hat, z1, a1, z2)

    # update the weights
    Wxh = Wxh - alpha * dJ_dWxh
    Why = Why - alpha * dJ_dWhy

    # compute cost
    c = cost_function(y, y_hat)

    cost.append(c)

# plot the cost function:
plt.grid()
plt.plot(range(num_iterations), cost)
print(type(cost))

plt.title('Cost Function')
plt.xlabel('Training Iterations')
plt.ylabel('Cost')
plt.show()                  # 必须加这段代码才能显示图

损失函数:

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值