参考博客 (https://blog.csdn.net/u013733326/article/details/79847918)
个人建议看他的好一些,我只是练习,过程中遇到的错误多看看原文下面评论
- 初始化
###初始化模型
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import init_utils #初始化
plt.rcParams['figure.figsize'] = (7.0, 4.0) #设置绘图的默认大小
plt.rcParams['image.interpolation'] = 'nearst'
plt.rcParams['image.cmap'] = 'gray'
#读取 绘制数据
#train_X, train_Y, test_X, test_Y = init_utils.load_dataset(is_plot=True)
##plt.show() #显示绘制图要去注释
#模型
def model(X,Y,learning_rate=0.01,num_iterations=5000,print_cost=True,initialization="he",is_polt=True):
"""
实现一个三层的神经网络:LINEAR ->RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
参数:
X - 输入的数据,维度为(2, 要训练/测试的数量)
Y - 标签,【0 | 1】,维度为(1,对应的是输入的数据的标签)
learning_rate - 学习速率
num_iterations - 迭代的次数
print_cost - 是否打印成本值,每迭代1000次打印一次
initialization - 字符串类型,初始化的类型【"zeros" | "random" | "he"】
is_polt - 是否绘制梯度下降的曲线图
返回
parameters - 学习后的参数
"""
grads = {}
costs = []
m = X.shape[1]
layers_dims = [X.shape[0],15,10,5,1]
#选择初始化参数的类型
if initialization == "zeros":
parameters = initialize_parameters_zeros(layers_dims)
elif initialization == "random":
parameters = initialize_parameters_random(layers_dims)
elif initialization == "he":
parameters = initialize_parameters_he(layers_dims)
else :
print("错误的初始化参数!程序退出")
exit
#开始学习
for i in range(0,num_iterations):
#前向传播
a4 , cache = init_utils.forward_propagation(X,parameters)
#计算成本
cost = init_utils.compute_loss(a4,Y)
#反向传播
grads = init_utils.backward_propagation(X,Y,cache)
#更新参数
parameters = init_utils.update_parameters(parameters,grads,learning_rate)
#记录成本
if i % 1000 == 0:
costs.append(cost)
#打印成本
if print_cost:
print("第" + str(i) + "次迭代,成本值为:" + str(cost))
#学习完毕,绘制成本曲线
if is_polt:
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
#返回学习完毕后的参数
return parameters
#初始化为零
def initialize_parameters_zeros(layers_dims):
"""
将模型的参数全部设置为0
参数:
layers_dims - 列表,模型的层数和对应每一层的节点的数量
返回
parameters - 包含了所有W和b的字典
W1 - 权重矩阵,维度为(layers_dims[1], layers_dims[0])
b1 - 偏置向量,维度为(layers_dims[1],1)
···
WL - 权重矩阵,维度为(layers_dims[L], layers_dims[L -1])
bL - 偏置向量,维度为(layers_dims[L],1)
"""
parameters = {}
L = len(layers_dims) #网络层数
for l in range(1,L):
parameters["W" + str(l)] = np.zeros((layers_dims[l],layers_dims[l-1]))
parameters["b" + str(l)] = np.zeros((layers_dims[l],1))
#使用断言确保我的数据格式是正确的
assert(parameters["W" + str(l)].shape == (layers_dims[l],layers_dims[l-1]))
assert(parameters["b" + str(l)].shape == (layers_dims[l],1))
return parameters
#测试初始化为0 的效果
'''
parameters = initialize_parameters_zeros([3, 2, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"])) '''
"""
初始化为零的结果:
W1 = [[0. 0. 0.]
[0. 0. 0.]]
b1 = [[0.]
[0.]]
W2 = [[0. 0.]]
b2 = [[0.]]
"""
#初始化为零的训练结果
#parameters = model(train_X,train_Y,initialization="zeros",is_polt=True)
#结果 略
#预测结果
'''
print("xun lian ji")
predictions_train = init_utils.predict(train_X, train_Y, parameters)
print("ceshiji")
predictions_test = init_utils.predict(test_X,test_Y,parameters)
'''
#随机初始化
def initialize_parameters_random(layers_dims):
"""
:param layers_dims: 列表,模型的层数和对应每一层的节点的数量
:return: parameters - 包含所有W和b的字典
"""
np.random.seed(3) # 指定随机种子
parameters = {}
L = len(layers_dims) # 层数
for l in range(1, L):
#使用十倍放缩
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) * 10
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
assert(parameters["W" + str(l)].shape == (layers_dims[l], layers_dims[l-1]))
assert (parameters["b" + str(l)].shape == (layers_dims[l], 1))
return parameters
#测试
"""
parameters = initialize_parameters_random([3, 2, 1])
print("w1=" + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
"""
测试结果
w1=[[ 17.88628473 4.36509851 0.96497468]
[-18.63492703 -2.77388203 -3.54758979]]
b1 = [[0.]
[0.]]
W2 = [[-0.82741481 -6.27000677]]
b2 = [[0.]]
"""
"""
#实际运行结果
parameters = model(train_X, train_Y,initialization="random", is_polt=True)
print("训练集")
predictions_train = init_utils.predict(train_X, train_Y,parameters)
print("测试集")
pretictions_test = init_utils.predict(test_X,test_Y,parameters)
print(predictions_train)
print(pretictions_test)
#绘图
plt.title("Model with large random initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
init_utils.plot_decision_boundary(lambda x: init_utils.predict_dec(parameters, x.T), train_X, train_Y)
#结果略
"""
#抑梯度异常初始化
def initialize_parameters_he(layers_dims):
"""
:param layers_dims:列表,模型的层数和对应每一层的节点的数量
:return:parameters - 包含了所有W和b的字典
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims)
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) *\
np.sqrt(2 / layers_dims[l -1])
parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
assert (parameters["W" + str(l)].shape == (layers_dims[l], layers_dims[l-1]))
assert (parameters["b" + str(l)].shape == (layers_dims[l],1))
return parameters
#测试
'''
parameters = initialize_parameters_he([2, 4, 3, 1])
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))'''
#测试结果
'''
W1 = [[ 1.78862847 0.43650985]
[ 0.09649747 -1.8634927 ]
[-0.2773882 -0.35475898]
[-0.08274148 -0.62700068]]
b1 = [[0.]
[0.]
[0.]
[0.]]
W2 = [[-0.03098412 -0.33744411 -0.92904268 0.62552248]]
b2 = [[0.]] '''
#将参数初始化到1附近,实际运行
''''
parameters = model(train_X, train_Y, initialization = "he",is_polt=True)
print("训练集:")
predictions_train = init_utils.predict(train_X, train_Y, parameters)
print("测试集:")
init_utils.predictions_test = init_utils.predict(test_X, test_Y, parameters)
'''
#绘图
'''
plt.title("Model with He initialization")
axes = plt.gca()
axes.set_xlim([-1.5, 1.5])
axes.set_ylim([-1.5, 1.5])
init_utils.plot_decision_boundary(lambda x: init_utils.predict_dec(parameters, x.T), train_X, train_Y)
'''
#结果略
init_utils.py
# -*- coding: utf-8 -*-
#init_utils.py
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
def compute_loss(a3, Y):
"""
Implement the loss function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
loss - value of the loss function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
loss = 1./m * np.nansum(logprobs)
return loss
def forward_propagation(X, parameters):
"""
Implements the forward propagation (and computes the loss) presented in Figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape ()
b1 -- bias vector of shape ()
W2 -- weight matrix of shape ()
b2 -- bias vector of shape ()
W3 -- weight matrix of shape ()
b3 -- bias vector of shape ()
Returns:
loss -- the loss function (vanilla logistic loss)
"""
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
W4 = parameters["W4"]
b4 = parameters["b4"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
z1 = np.dot(W1, X) + b1
a1 = relu(z1)
z2 = np.dot(W2, a1) + b2
a2 = relu(z2)
z3 = np.dot(W3, a2) + b3
a3 = sigmoid(z3)
z4 = np.dot(W4, a3) + b4
a4 = sigmoid(z4)
cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3, z4, a4, W4, b4)
return a4, cache
def backward_propagation(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
cache -- cache output from forward_propagation()
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3, z4, a4, W4, b4) = cache
dz4 = 1. / m * (a4 - Y)
dW4 = np.dot(dz4, a3.T)
db4 = np.sum(dz4, axis=1, keepdims=True)
dz3 = 1./m * (a3 - Y)
dW3 = np.dot(dz3, a2.T)
db3 = np.sum(dz3, axis=1, keepdims = True)
da2 = np.dot(W3.T, dz3)
dz2 = np.multiply(da2, np.int64(a2 > 0))
dW2 = np.dot(dz2, a1.T)
db2 = np.sum(dz2, axis=1, keepdims = True)
da1 = np.dot(W2.T, dz2)
dz1 = np.multiply(da1, np.int64(a1 > 0))
dW1 = np.dot(dz1, X.T)
db1 = np.sum(dz1, axis=1, keepdims = True)
gradients = {"dz4": dz4, "dW4": dW4, "db4": db4,
"dz3": dz3, "dW3": dW3, "db3": db3,
"da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,
"da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}
return gradients
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients, output of n_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters['W' + str(i)] = ...
parameters['b' + str(i)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for k in range(L):
parameters["W" + str(k+1)] = parameters["W" + str(k+1)] - learning_rate * grads["dW" + str(k+1)]
parameters["b" + str(k+1)] = parameters["b" + str(k+1)] - learning_rate * grads["db" + str(k+1)]
return parameters
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
p = np.zeros((1,m), dtype = np.int)
# Forward propagation
a3, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, a3.shape[1]):
if a3[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
# print results
print("Accuracy: " + str(np.mean((p[0,:] == y[0,:]))))
return p
def load_dataset(is_plot=True):
np.random.seed(1)
train_X, train_Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)
np.random.seed(2)
test_X, test_Y = sklearn.datasets.make_circles(n_samples=100, noise=.05)
# Visualize the data
if is_plot:
plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);
train_X = train_X.T
train_Y = train_Y.reshape((1, train_Y.shape[0]))
test_X = test_X.T
test_Y = test_Y.reshape((1, test_Y.shape[0]))
return train_X, train_Y, test_X, test_Y
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=np.squeeze(y), cmap=plt.cm.Spectral)
plt.show()
def predict_dec(parameters, X):
"""
Used for plotting decision boundary.
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (m, K)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Predict using forward propagation and a classification threshold of 0.5
a3, cache = forward_propagation(X, parameters)
predictions = (a3>0.5)
return predictions
- 正则化
#正则化
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import reg_utils #正则化
train_X, train_Y, test_X, test_Y = reg_utils.load_2D_dataset(is_plot=True)
#模型
def model(X,Y,learning_rate=0.3,num_iterations=30000,print_cost=True,is_plot=True,lambd=0,keep_prob=1):
"""
实现一个三层的神经网络
:param X:输入的数据,维度为(2,要训练/测试的数量)
:param Y: 标签,【0(蓝色) | 1(红色)】,维度为(1,对应的是输入的数据的标签)
:param learning_rate:学习速率
:param num_iterations:迭代的次数
:param print_cost:是否打印成本值,每迭代10000次打印一次,但是每1000次记录一个成本值
:param is_plot:是否绘制梯度下降的曲线图
:param lambd:正则化的超参数,实数
:param keep_prob:随机删除节点的概率
:return:parameters - 学习后的参数
"""
grads = {}
costs = []
m = X.shape[1]
layer_dims = [X.shape[0],20,3,1]
#初始化参数
parameters = reg_utils.initialize_parameters(layer_dims)
#开始学习
for i in range(0, num_iterations):
#前向传播
##是否随机删节点
if keep_prob == 1:
#不随机删除节点
a3, cache = reg_utils.forward_propagation(X, parameters)
elif keep_prob < 1:
#随机删除节点
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
else:
print("参数错误,退出")
exit()
#计算成本
##是否使用二范数
if lambd == 0:
###不使用L2正则化
cost = reg_utils.compute_cost(a3, Y)
else:
###使用L2正则化
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
#反向传播
##可以同时使用L2正则化和随机删除节点
##两个参数的使用情况
if(lambd == 0 and keep_prob == 1):
###不使用L2正则化 和 不使用随即删除节点
grads = reg_utils.backward_propagation(X, Y, cache)
elif lambd != 0 and keep_prob == 1:
###使用L2正则化,不使用随即删除节点
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif lambd == 0 and keep_prob < 1:
###使用随机删除节点,不使用L2正则化
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
else:
pass
#更新参数
parameters = reg_utils.update_parameters(parameters, grads, learning_rate)
#记录 打印成本
if i % 1000 == 0:
##记录成本
costs.append(cost)
if print_cost and i % 10000 == 0:
print("di" + str(i) + "ci 迭代,成本值为:" + str(cost))
#是否绘制成本曲线
if is_plot\
:
plt.plot(cost)
plt.ylabel('cost')
plt.xlabel('iterations(x1,000)')
plt.title("learning_rate = " + str(learning_rate))
plt.show()
return parameters
'''
#查看不使用正则化的模型效果
parameters = model(train_X, train_Y, is_plot=True)
print("训练集")
predictions_train = reg_utils.predict(train_X, train_Y, parameters)
print("测试集")
predictions_test = reg_utils.predict(test_X, test_Y, parameters)
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
reg_utils.plot_decision_boundary(lambda x: reg_utils.predict_dec(parameters, x.T), train_X, train_Y)
'''
#使用正则化
def compute_cost_with_regularization(A3, Y, parameters, lambd):
"""
实现L2正则化计算成本
:param a3: 正向传播的输出结果,维度为(输出节点数量,训练/测试数量)
:param Y: 标签向量,与数据一一对齐,维度为(输出节点数量,训练/测试的数量)
:param parameters: - 包含模型学习后的参数字典
:param lambd:
:return:cost - 使用L2正则化计算出的正则化损失的值
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entrepy_cost = reg_utils.compute_cost(A3, Y)
L2_regularization_cost = lambd * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3))) / (2 * m)
cost = cross_entrepy_cost + L2_regularization_cost
return cost
#当然,因为改变了成本函数,我们也必须改变向后传播的函数, 所有的梯度都必须根据这个新的成本值来计算。
def backward_propagation_with_regularization(X, Y, cache, lambd):
"""
实现添加了L2正则化的模型的后向传播
:param X: 输入数据集,维度为(输入节点数量,数据集里面的数量)
:param Y: 标签,维度为(输出节点数量,数据集里面的数量)
:param cache:来自forward_propagation()的cache输出
:param lambd:regularization超参数 实数
:return:gradients 一个包含每个参数,激活值和预激活变量的梯度的字典
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = (1 / m) * np.dot(dZ3, A2.T) + ((lambd * W3) / m)
db3 = (1 / m) * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = (1 / m) * np.dot(dZ2, A1.T) + ((lambd * W2) / m)
db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = (1 / m) * np.dot(dZ1, X.T) + ((lambd * W1) / m)
db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
#使用正则化的模型结果
'''
parameters = model(train_X, train_Y, lambd=0.7,is_plot=True)
print("使用正则化,训练集:")
predictions_train = reg_utils.predict(train_X, train_Y, parameters)
print("使用正则化,测试集:")
predictions_test = reg_utils.predict(test_X, test_Y, parameters)
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
reg_utils.plot_decision_boundary(lambda x: reg_utils.predict_dec(parameters, x.T), train_X, train_Y)
'''
#随机删除节点
def forward_propagation_with_dropout(X, parameters, keep_prob):
"""
实现具有随机舍弃节点的前向传播
:param X: 输入数据集,维度为(2,示例数)
:param parameters: 包含参数的字典
:param keep_prob: 随即删除的概率
:return: A3 - 最后的激活值,维度为(1,1),正向传播的输出
cache - 储存了一些用于计算反向传播的数值的元组
"""
np.random.seed(1)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
Z1 = np.dot(W1, X) + b1
A1 = reg_utils.relu(Z1)
#下面的步骤1-4对应于上述的步骤1-4
D1 = np.random.rand(A1.shape[0], A1.shape[1]) #步骤1:初始化矩阵D1 = np.random.rand(..., ...)
D1 = D1 < keep_prob #步骤2:将D1的值转换为0或1(使用keep_prob作为阈值)
A1 = A1 * D1 #步骤3:舍弃A1的一些节点(将它的值变为0或False)
A1 = A1 / keep_prob #步骤4:缩放未舍弃的节点(不为0)的值
Z2 = np.dot(W2, A1) + b2
A2 = reg_utils.relu(Z2)
# 下面的步骤1-4对应于上述的步骤1-4。
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # 步骤1:初始化矩阵D2 = np.random.rand(..., ...)
D2 = D2 < keep_prob # 步骤2:将D2的值转换为0或1(使用keep_prob作为阈值)
A2 = A2 * D2 # 步骤3:舍弃A1的一些节点(将它的值变为0或False)
A2 = A2 / keep_prob # 步骤4:缩放未舍弃的节点(不为0)的值
Z3 = np.dot(W3, A2) + b3
A3 = reg_utils.sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
#需要同时改变反向传播
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
"""
实现随即删除模型的后向传播
:param X: 输入数据集,维度为(2,示例数)
:param Y: 标签,维度为(输出节点数量,示例数量)
:param cache: 前向传播的cache输出
:param keep_prob: 随机删除概率
:return: gradients 一个关于每个参数、激活值和预激活变量的梯度值的字典
"""
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = (1 / m) * np.dot(dZ3, A2.T)
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dA2 *= D2 #步骤1:使用正向传播期间相同的节点,舍弃那些关闭的节点(因为任何数乘以0或者False都为0或者False)
dA2 /= keep_prob ## 步骤2:缩放未舍弃的节点(不为0)的值
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./ m * np.dot(dZ2, A1.T)
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dA1 *= D1 # 步骤1:使用正向传播期间相同的节点,舍弃那些关闭的节点(因为任何数乘以0或者False都为0或者False)
dA1 = dA1 / keep_prob # 步骤2:缩放未舍弃的节点(不为0)的值
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.dot(dZ1, X.T)
db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3":dW3, "db3":db3, "dA2":dA2,
"dZ2":dZ2, "dW2":dW2, "db2":db2, "dA1":dA1,
"dZ1":dZ1, "dW1":dW1, "db1":db1}
return gradients
#测试随机删除节点的模型
parameters = model(train_X, train_Y, keep_prob=0.86, learning_rate=0.3,is_plot=True)
print("使用随机删除节点,训练集")
predictions_train = reg_utils.predict(train_X, train_Y, parameters)
print("使用随机删除节点,测试集")
predictions_test = reg_utils.predict(test_X, test_Y, parameters)
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75, 0.4])
axes.set_ylim([-0.75, 0.65])
reg_utils.plot_decision_boundary(lambda x:reg_utils.predict_dec(parameters, x.T), train_X, train_Y)
reg_utils.py
# -*- coding: utf-8 -*-
#reg_utils.py
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
def sigmoid(x):
"""
Compute the sigmoid of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- sigmoid(x)
"""
s = 1/(1+np.exp(-x))
return s
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s
def initialize_parameters(layer_dims):
"""
Arguments:
layer_dims -- python array (list) containing the dimensions of each layer in our network
Returns:
parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
b1 -- bias vector of shape (layer_dims[l], 1)
Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
bl -- bias vector of shape (1, layer_dims[l])
Tips:
- For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].
This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
- In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
"""
np.random.seed(3)
parameters = {}
L = len(layer_dims) # number of layers in the network
for l in range(1, L):
parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])
parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
#assert(parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l-1])
#assert(parameters['W' + str(l)].shape == layer_dims[l], 1)
return parameters
def forward_propagation(X, parameters):
"""
Implements the forward propagation (and computes the loss) presented in Figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape ()
b1 -- bias vector of shape ()
W2 -- weight matrix of shape ()
b2 -- bias vector of shape ()
W3 -- weight matrix of shape ()
b3 -- bias vector of shape ()
Returns:
loss -- the loss function (vanilla logistic loss)
"""
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
z1 = np.dot(W1, X) + b1
a1 = relu(z1)
z2 = np.dot(W2, a1) + b2
a2 = relu(z2)
z3 = np.dot(W3, a2) + b3
a3 = sigmoid(z3)
cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)
return a3, cache
def compute_cost(a3, Y):
"""
Implement the cost function
Arguments:
a3 -- post-activation, output of forward propagation
Y -- "true" labels vector, same shape as a3
Returns:
cost - value of the cost function
"""
m = Y.shape[1]
logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
cost = 1./m * np.nansum(logprobs)
return cost
def backward_propagation(X, Y, cache):
"""
Implement the backward propagation presented in figure 2.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
cache -- cache output from forward_propagation()
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
"""
m = X.shape[1]
(z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache
dz3 = 1./m * (a3 - Y)
dW3 = np.dot(dz3, a2.T)
db3 = np.sum(dz3, axis=1, keepdims = True)
da2 = np.dot(W3.T, dz3)
dz2 = np.multiply(da2, np.int64(a2 > 0))
dW2 = np.dot(dz2, a1.T)
db2 = np.sum(dz2, axis=1, keepdims = True)
da1 = np.dot(W2.T, dz2)
dz1 = np.multiply(da1, np.int64(a1 > 0))
dW1 = np.dot(dz1, X.T)
db1 = np.sum(dz1, axis=1, keepdims = True)
gradients = {"dz3": dz3, "dW3": dW3, "db3": db3,
"da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,
"da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}
return gradients
def update_parameters(parameters, grads, learning_rate):
"""
Update parameters using gradient descent
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradien ts, output of n_model_backward
Returns:
parameters -- python dictionary containing your updated parameters
parameters['W' + str(i)] = ...
parameters['b' + str(i)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for k in range(L):
parameters["W" + str(k+1)] = parameters["W" + str(k+1)] - learning_rate * grads["dW" + str(k+1)]
parameters["b" + str(k+1)] = parameters["b" + str(k+1)] - learning_rate * grads["db" + str(k+1)]
return parameters
def load_2D_dataset(is_plot=True):
data = sio.loadmat('data.mat')
train_X = data['X'].T
train_Y = data['y'].T
test_X = data['Xval'].T
test_Y = data['yval'].T
if is_plot:
plt.scatter(train_X[0, :], train_X[1, :], c=np.squeeze(train_Y) , s=40, cmap=plt.cm.Spectral)
return train_X, train_Y, test_X, test_Y
def predict(X, y, parameters):
"""
This function is used to predict the results of a n-layer neural network.
Arguments:
X -- data set of examples you would like to label
parameters -- parameters of the trained model
Returns:
p -- predictions for the given dataset X
"""
m = X.shape[1]
p = np.zeros((1,m), dtype = np.int)
# Forward propagation
a3, caches = forward_propagation(X, parameters)
# convert probas to 0/1 predictions
for i in range(0, a3.shape[1]):
if a3[0,i] > 0.5:
p[0,i] = 1
else:
p[0,i] = 0
# print results
print("Accuracy: " + str(np.mean((p[0,:] == y[0,:]))))
return p
def plot_decision_boundary(model, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0, :], X[1, :], c=np.squeeze(y), cmap=plt.cm.Spectral)
plt.show()
def predict_dec(parameters, X):
"""
Used for plotting decision boundary.
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (m, K)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
# Predict using forward propagation and a classification threshold of 0.5
a3, cache = forward_propagation(X, parameters)
predictions = (a3>0.5)
return predictions
- 梯度校验
#梯度校验
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import sklearn.datasets
import gc_utils #第三部分,梯度校验
#%matplotlib inline #如果你使用的是Jupyter Notebook,请取消注释。
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
#一维线性
def forward_propagation(x, theta):
"""
实现图中呈现的线性前向传播(计算J)(j(theta) = theta * x)
:param x:一个实值的输入
:param theta: 参数,也是一个实数
:return: J -- 函数J的值
"""
J = np.dot(theta, x)
return J
def backward_parpagation(x, theta):
"""
计算J相对于θ的导数
:param x:一个实数输入
:param theta:参数,也是实数
:return:dtheta - 相对于θ的成本梯度
"""
dtheta = x
return dtheta
def gradient_check(x, theta, epsilon=1e-7):
"""
实现反向传播
:param x: 实数输入
:param theta: 参数
:param epsilon: 使用公式计算输入的微小偏移以计算近似梯度
:return:difference -- 近似梯度和后向传播梯度之间的差异
"""
#使用导数定义公式计算gradapprox
thetaplus = theta + epsilon
thetaminus = theta - epsilon
J_plus = forward_propagation(x, thetaplus)
J_minus = forward_propagation(x, thetaminus)
gradapprox = (J_plus - J_minus) / (2 * epsilon)
#检查gradapprox是否足够接近backward_propagation() 的输出
grad = backward_parpagation(x, theta)
numerator = np.linalg.norm(grad - gradapprox)
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)
difference = numerator / denominator
if difference < 1e-7:
print("梯度检查:梯度正常")
else:
print("梯度检查,梯度超出阈值")
return difference
#测试gradient_check
'''
x, theta = 2, 4
difference = gradient_check(x, theta)
print(str(difference)) '''
#高维
def forward_propagation_n(X, Y, parameters):
"""
实现高维的前向传播 并计算成本
:param X:训练集为m个例子
:param Y:m个示例的标签
:param parameters:包含参数的字典
:return:cost - 成本函数(logistic)
"""
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
Z1 = np.dot(W1, X) + b1
A1 = gc_utils.relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = gc_utils.relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = gc_utils.sigmoid(Z3)
#计算成本
logprobs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = (1 / m) * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
def backward_propagation_n(X, Y, cache):
"""
实现反向传播
:param X: 输入数据点(输入节点数量,1)
:param Y: 标签
:param cache: 前向传播的cache
:return: gradients - 一个字典,其中包含与每个参数、激活和激活前变量相关的成本梯度。
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = (1. / m) * np.dot(dZ3, A2.T)
db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1. / m * np.dot(dZ2, A1.T)
db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {
"dZ3":dZ3, "dW3":dW3, "db3":db3,
"dA2":dA2, "dZ2":dZ2, "dW2":dW2, "db2":db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1
}
return gradients
def gradient_check_n(parameters, gradients, X, Y, epsilon=1e-7):
"""
检查backward_propagation_n是否正确计算forward_propagation_n输出的成本梯度
:param parameters:包含参数“W1”,“b1”,“W2”,“b2”,“W3”,“b3”的python字典
:param gradients:反向传播返回的字典
:param X:输入数据点,维度为(输入节点数量, 1)
:param Y:标签
:param epsilon:计算输入的微小偏移计算近似梯度
:return:difference - 近似梯度和后向传播梯度之间的差异
"""
#初始化参数
parameters_values, keys = gc_utils.dictionary_to_vector(parameters)
grad = gc_utils.gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
#计算gradapprox
for i in range(num_parameters):
#计算J_plus[i] 输入:"parameters_values,epsilon".输出=”J_plus[i]“
thetaminus = np.copy(parameters_values)
thetaminus[i][0] = thetaminus[i][0] + epsilon
J_minus[i], cache = forward_propagation_n(X, Y, gc_utils.vector_to_dictionary(thetaminus))
#计算gradapprox[i]
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
#通过计算差异比较gradapprox和后向传播梯度
numerator = np.linalg.norm(grad - gradapprox)
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)
difference = numerator / denominator
if difference < 1e-7:
print("梯度检查:梯度正常")
else:
print("梯度检查:梯度超出阈值")
return difference
gc_utils.py
import numpy as np
import matplotlib.pyplot as plt
def sigmoid(x):
s = 1/(1 + np.exp(-x))
return s
def relu(x):
s = np.maximum(0, x)
return s
def dictionary_to_vector(parameters):
#从满足特定形状要求的单个向量展开所有参数字典
keys = []
count = 0
for key in ["W1", "b1", "W2", "b2", "W3", "b3"]:
# 平参数
new_vector = np.reshape(parameters[key], (-1, 1))
keys = keys + [key] * new_vector.shape[0]
if count == 0:
theta = new_vector
else:
theta = np.concatenate((theta, new_vector), axis=0)
count = count + 1
return theta, keys
def vector_to_dictionary(theta):
"""
从满足特定形状要求的单个向量展开所有参数字典。
"""
parameters = {}
parameters["W1"] = theta[:20].reshape((5,4))
parameters["b1"] = theta[20:25].reshape((5,1))
parameters["W2"] = theta[25:40].reshape((3,5))
parameters["b2"] = theta[40:43].reshape((3,1))
parameters["W3"] = theta[43:46].reshape((1,3))
parameters["b3"] = theta[46:47].reshape((1,1))
return parameters
def gradients_to_vector(gradients):
"""
将所有梯度字典滚动到一个向量中,以满足特定的形状要求。
"""
count = 0
for key in ["dW1", "db1", "dW2", "db2", "dW3", "db3"]:
# flatten parameter
new_vector = np.reshape(gradients[key], (-1,1))
if count == 0:
theta = new_vector
else:
theta = np.concatenate((theta, new_vector), axis=0)
count = count + 1
return theta
参考文章里到这里就没了,最后的验证就没有,很难受