上次写的神经网络,让我抓狂的问题终于在这节课找到了答案。
对于不同的激活函数,不同的初始权重值往往对学习结果起着至关重要的作用。
RELU函数适合使用he初始化,tanh函数适合使用Xavier初始化。
这次作业主要是了解不同初始化的学习效果。
首先,使用he初始化:照搬之前自己手搓的神经网络。
import numpy as np
import matplotlib.pyplot as plt
import init_utils
def init(x):
first_num = 20
second_num = 7
third_num = 3
np.random.seed(5)
w1 = np.random.randn(first_num, x.shape[0]) * np.sqrt(2 / x.shape[0])
b1 = np.zeros((first_num, 1))
w2 = np.random.randn(second_num, first_num) * np.sqrt(2 / first_num)
b2 = np.zeros((second_num, 1))
w3 = np.random.randn(third_num, second_num) * np.sqrt(2 / second_num)
b3 = np.zeros((third_num, 1))
w4 = np.random.randn(1, third_num) * np.sqrt(2 / third_num)
b4 = np.zeros((1, 1))
ini_param = {
"w1": w1,
"b1": b1,
"w2": w2,
"b2": b2,
"w3": w3,
"b3": b3,
"w4": w4,
"b4": b4
}
return ini_param
剩下就是前向、后向,代码如下:
# 构建前向
def cal_z(w, a, b): # a维度:上层节点数*例子数 w维度:该层节点数*上层节点数
return np.dot(w, a) + b
def cal_sigma(z): # z维度:该层节点数*例子数
return 1 / (1 + np.exp(-z))
def cal_tan_h(z): # z1维度hidden_num*x.shape[1]
return np.tanh(z)
def cal_relu(z): # z维度:该层节点数*例子数
return np.maximum(0, z)
def forward_f(x, p):
z1 = cal_z(p["w1"], x, p["b1"])
a1 = cal_relu(z1)
z2 = cal_z(p["w2"], a1, p["b2"])
a2 = cal_relu(z2)
z3 = cal_z(p["w3"], a2, p["b3"])
a3 = cal_relu(z3)
z4 = cal_z(p["w4"], a3, p["b4"])
a4 = cal_sigma(z4)
forward_param = {
"z1": z1,
"a1": a1,
"z2": z2,
"a2": a2,
"z3": z3,
"a3": a3,
"z4": z4,
"a4": a4
}
return forward_param
# 计算损失函数
def cost_f(a, y): # a维度: 1*例子数 y维度:1*例子数
m = y.shape[1]
return -np.sum(y * np.log(a) + (1 - y) * np.log(1 - a)) / m
# 后向
def cal_dz_last(a, y): # ai维度:第i层节点数*例子数 y维度:1*例子数
return a - y
def cal_dw_db(dz, a, m): # dzi维度:第i层节点数*例子数 ai维度:第i层节点数*例子数
return np.dot(dz, a.T) / m, np.sum(dz, axis=1, keepdims=True) / m
def cal_da(dz, w): # dzi维度:第i层节点数*例子数 wi维度:第i层节点数*第i-1层节点数
return np.dot(w.T, dz)
def cal_dtanh(da, a): # dai维度:第i层节点数*例子数 ai维度:第i层节点数*例子数
return da * (1 - (a * a))
def cal_drelu(da, z): # dai维度:第i层节点数*例子数 zi维度:第i层节点数*例子数
t = np.ones(z.shape)
t[z <= 0] = 0
return da * t
def back_f(p, f_p, x, y):
dz4 = cal_dz_last(f_p["a4"], y)
dw4, db4 = cal_dw_db(dz4, f_p["a3"], y.shape[1])
da3 = cal_da(dz4, p["w4"])
dz3 = cal_drelu(da3, f_p["z3"])
dw3, db3 = cal_dw_db(dz3, f_p["a2"], y.shape[1])
da2 = cal_da(dz3, p["w3"])
dz2 = cal_drelu(da2, f_p["z2"])
dw2, db2 = cal_dw_db(dz2, f_p["a1"], y.shape[1])
da1 = cal_da(dz2, p["w2"])
dz1 = cal_drelu(da1, f_p["z1"])
dw1, db1 = cal_dw_db(dz1, x, y.shape[1])
back_param = {
"dw4": dw4,
"db4": db4,
"dw3": dw3,
"db3": db3,
"dw2": dw2,
"db2": db2,
"dw1": dw1,
"db1": db1
}
return back_param
# 更新参数
def update_p(p, b_p, learning_rate):
upd_p = {
"w1": p["w1"] - learning_rate * b_p["dw1"],
"b1": p["b1"] - learning_rate * b_p["db1"],
"w2": p["w2"] - learning_rate * b_p["dw2"],
"b2": p["b2"] - learning_rate * b_p["db2"],
"w3": p["w3"] - learning_rate * b_p["dw3"],
"b3": p["b3"] - learning_rate * b_p["db3"],
"w4": p["w4"] - learning_rate * b_p["dw4"],
"b4": p["b4"] - learning_rate * b_p["db4"]
}
return upd_p
# 建模
def model(x, y, learning_rate, loop_num):
p = init(x)
cost_t = []
for i in range(loop_num):
f_p = forward_f(x, p)
b_p = back_f(p, f_p, x, y)
p = update_p(p, b_p, learning_rate)
if i % 100 == 0:
cost_t.append(cost_f(f_p["a4"], y))
return p, cost_t
然后我在作业提供py文件里发现个画图的相当不错,我剽窃来改改
def print_figure(x, y, final_p, _cost):
x_min, x_max = x[0, :].min() - 1, x[0, :].max() + 1
y_min, y_max = x[1, :].min() - 1, x[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = forward_f(np.vstack((xx.ravel(), yy.ravel())), final_p)
Z = np.round(Z["a4"]).reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(x[0, :], x[1, :], c=y, cmap=plt.cm.Spectral)
plt.figure()
plt.plot(_cost)
plt.show()
载入数据运行:
train_X, train_Y, test_X, test_Y = init_utils.load_dataset(is_plot=False)
# print(test_X.shape)
# print(test_Y.shape)
model_p, cost = model(test_X, test_Y, 0.01, 12000)
test_f_p = forward_f(test_X, model_p)
train_f_p = forward_f(train_X, model_p)
print("训练集准确度:", 100 * (1 - np.sum(np.abs(np.round(train_f_p["a4"]) - train_Y)) / train_Y.shape[1]), '%')
print("测试集准确度:", 100 * (1 - np.sum(np.abs(np.round(test_f_p["a4"]) - test_Y)) / test_Y.shape[1]), '%')
print_figure(train_X,train_Y,model_p,cost)
结果相当完美
训练集准确度: 94.0 %
测试集准确度: 99.0 %
使用0值初始化,初始化代码如下:
import numpy as np
import matplotlib.pyplot as plt
import init_utils
def init(x):
"""
要构建如下条件的深层神经网络
4层:3个隐藏层,1个输出层
input->(hidden1:7个节点,激活函数relu)->(hidden2:5个节点,激活函数relu)
——>(output, 激活函数sigma)
"""
first_num = 20
second_num = 7
third_num = 3
np.random.seed(5)
w1 = np.zeros((first_num, x.shape[0]))
b1 = np.zeros((first_num, 1))
w2 = np.zeros((second_num, first_num))
b2 = np.zeros((second_num, 1))
w3 = np.zeros((third_num, second_num))
b3 = np.zeros((third_num, 1))
w4 = np.zeros((1, third_num))
b4 = np.zeros((1, 1))
ini_param = {
"w1": w1,
"b1": b1,
"w2": w2,
"b2": b2,
"w3": w3,
"b3": b3,
"w4": w4,
"b4": b4
}
return ini_param
没法学习
将初始值设为很大
def init(x):
first_num = 20
second_num = 7
third_num = 3
np.random.seed(5)
w1 = np.random.randn(first_num, x.shape[0])*5
b1 = np.zeros((first_num, 1))
w2 = np.random.randn(second_num, first_num)*8
b2 = np.zeros((second_num, 1))
w3 = np.random.randn(third_num, second_num)*5
b3 = np.zeros((third_num, 1))
w4 = np.random.randn(1, third_num)
b4 = np.zeros((1, 1))
ini_param = {
"w1": w1,
"b1": b1,
"w2": w2,
"b2": b2,
"w3": w3,
"b3": b3,
"w4": w4,
"b4": b4
}
return ini_param
训练集准确度: 56.666666666666664 %
测试集准确度: 60.0 %
损失值降不下去