参考文档:一步步搭建多层神经网络及其应用 码了两天终于码完了,搞不懂的地方CSDN大致是理解了,希望自己可以多回来看看吧。
目录结构
main.py
import numpy as np
import matplotlib.pyplot as plt
import Deep_Learning.test4.testCases
from Deep_Learning.test4.dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward
import Deep_Learning.test4.lr_utils
np.random.seed(1) # 指定随机种子
# 初始化参数
# 对于一个两层的神经网络结构,模型结构是线性->ReLU->线性->sigmoid
def initialize_parameters(n_x, n_h, n_y):
"""
此函数是为了初始化两层神经网络参数而使用的函数
:param n_x: -输入层节点数量
:param n_h: -隐藏层节点数量
:param n_y: -输出层节点数量
:return: parameters -包含参数的python字典
W1 -权重矩阵,维度为(n_h, n_x)
b1 -偏向量,维度为(n_h, 1)
W2 -权重矩阵,维度为(n_y, n_h)
b2 -偏向量,维度为(n_y, 1)
"""
W1 = np.random.rand(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.rand(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
# 使用断言确保数据格式的正确性
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
# 测试initialize_parameters
print("=========================测试initialize_parameters======================")
parameters = initialize_parameters(3, 2, 1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
运行结果:
W1 = [[4.17022005e-03 7.20324493e-03 1.14374817e-06]
[3.02332573e-03 1.46755891e-03 9.23385948e-04]]
b1 = [[0.]
[0.]]
W2 = [[0.0018626 0.00345561]]
b2 = [[0.]]
"""
# L层的神经网络
def initialize_parameters_deep(layers_dims):
"""
初始化多层神经网络参数
:param layers_dims: -包含网络中每个图层的节点数量的列表
:return: parameters -包含参数"W1","b1",...,"WL","bL"的字典
Wl -权重矩阵,维度为(layer_dims[l],layer_dims[l-1])
bl -偏向量,维度为(layer_dims[l],1)
"""
np.random.seed(3)
parameters = {}
L = len(layers_dims)
for l in range(1, L):
# 为什么/np.sqrt(layers_dims[l-1]在第二门课程1.11讲到,目的是防止梯度消失或梯度爆炸
parameters["W" + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) / np.sqrt(layers_dims[l - 1])
parameters["b" + str(l)] = np.zeros((layers_dims[l], 1))
# 使用断言确保数据正确性
assert(parameters["W" + str(l)].shape == (layers_dims[l], layers_dims[l - 1]))
assert(parameters["b" + str(l)].shape == (layers_dims[l], 1))
return parameters
# 测试initialize_parameters_deep
print("==================测试initialize_parameters_deep======================")
layer_dims = [5, 4, 3]
parameters = initialize_parameters_deep(layer_dims)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
运行结果:
W1 = [[ 0.79989897 0.19521314 0.04315498 -0.83337927 -0.12405178]
[-0.15865304 -0.03700312 -0.28040323 -0.01959608 -0.21341839]
[-0.58757818 0.39561516 0.39413741 0.76454432 0.02237573]
[-0.18097724 -0.24389238 -0.69160568 0.43932807 -0.49241241]]
b1 = [[0.]
[0.]
[0.]
[0.]]
W2 = [[-0.59252326 -0.10282495 0.74307418 0.11835813]
[-0.51189257 -0.3564966 0.31262248 -0.08025668]
[-0.38441818 -0.11501536 0.37252813 0.98805539]]
b2 = [[0.]
[0.]
[0.]]
"""
# 前向传播函数
# 线性部分
def linear_forward(A, W, b):
"""
实现前向传播的线性部分
:param A: -来自上一层(或输入数据)的激活,维度为(上一层的节点数量,示例的数量)
:param W: -权重矩阵,numpy数组,维度为(当前图层的节点数量,上一图层的节点数量)
:param b: -偏向量,numpy向量,维度为(当前图层的节点数量,1)
:return: Z -激活功能的输入,也称为预激活参数
cache -一个包含"A","W"和"b"字典,存储这些变量方便计算后向传播
"""
Z = np.dot(W, A) + b
# SyntaxWarning: assertion is always true, perhaps remove parentheses?
# assert (Z.shape == W.shape[0], A.shape[1])
cache = (A, W, b)
return Z, cache
# 测试linear_forward
print("=================测试linear_forward================")
A, W, b = Deep_Learning.test4.testCases.linear_forward_test_case()
Z, liner_cache = linear_forward(A, W, b)
print("Z = " + str(Z))
"""
运行结果:
Z = [[ 3.26295337 -1.23429987]]
"""
# 激活部分
def linear_activation_forward(A_prev, W, b, activation):
"""
实现LINEAR -> ACTIVATION这一层的前向传播
:param A_prev: -来自上一层(或输入层)的激活,维度为(上一层的节点数量,示例数)
:param W: -权重矩阵,numpy数组,维度为(当前图层的节点数量,上一图层的节点数量)
:param b: -偏向量,numpy向量,维度为(当前图层的节点数量,1)
:param activation: -选择在此层中使用的激活函数名,字符串类型,【"sigmoid" | "relu"】
:return: A -激活函数的输出,也称为激活后的值
cache -一个包含"linear_cache"和“activation_cache”的字典,储存以便于后向传播使用
"""
if activation == "sigmoid":
Z, liner_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
elif activation == "relu":
Z,liner_cache = linear_forward(A_prev, W, b)
A,activation_cache = relu(Z)
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (liner_cache, activation_cache)
return A, cache
# 测试linear_activation_forward
print("====================测试linear_activation_forward======================")
A_prev, W, b = Deep_Learning.test4.testCases.linear_activation_forward_test_case()
A, linear_activaton_cache = linear_activation_forward(A_prev, W, b, activation="sigmoid")
print("sigmoid, A = " + str(A))
A, linear_activaton_cache = linear_activation_forward(A_prev, W, b, activation="relu")
print("ReLU, A = " + str(A))
"""
运行结果:
sigmoid, A = [[0.96890023 0.11013289]]
ReLU, A = [[3.43896131 0. ]]
"""
# 多层模型的前向传播计算模型
def L_model_forward(X, parameters):
"""
实现[LINEAR -> ReLU] * (L - 1) ->LINEAR -> SIGMOID计算多层神经网络的前向传播,为后面每一层都执行LINEAR和ACTIVATION
:param X: -数据,numpy数组,维度为(输入节点数量,示例数)
:param parameters: -initialize_parameters_deep()的输出
:return: AL -最后的激活值
caches -包含以下内容的缓存列表:
linear_relu_forward()的每一个cache(有L-1个,索引从0到L-2)
linear_sigmoid_forward()的cache(只有一个,索引为L-1)
"""
caches = []
A = X
# // 2是因为 parameters中存储有w_i,b_i两个参数,而L需要的是层数,每层两个参数,所以总参数个数/2就等于层数
L = len(parameters) // 2
for l in range(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W' + str(l)], parameters['b' + str(l)], "relu")
caches.append(cache)
AL, cache = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid")
caches.append(cache)
assert (AL.shape == (1, X.shape[1]))
return AL, caches
# 测试L_model_forward
print("=====================测试L_model_forward========================")
X, parameters = Deep_Learning.test4.testCases.L_model_forward_test_case()
AL, caches = L_model_forward(X, parameters)
print("AL = " + str(AL))
print("cache的长度为 = " + str(len(caches)))
"""
运行结果:
AL = [[0.17007265 0.2524272 ]]
cache的长度为 = 2
"""
# 计算成本
def compute_cost(AL, Y):
"""
:param AL: -与标签预测相对应的概率向量,维度为(1,示例数量)
:param Y: -标签向量(例如:如果不是猫则为0,是猫则为1),维度为(1,数量)
:return: cost -交叉熵成本
"""
m = Y.shape[1]
cost = - np.sum(np.multiply(np.log(AL), Y) + np.multiply(np.log(1 - AL), 1 - Y)) / m
cost = np.squeeze(cost)
assert (cost.shape == ())
return cost
# 测试compute_cost
print("==========================测试compute_cost====================")
Y, AL = Deep_Learning.test4.testCases.compute_cost_test_case()
print("cost = " + str(compute_cost(AL, Y)))
"""
运行结果:
cost = 0.414931599615397
"""
# 反向传播
# 线性部分
def linear_backward(dZ, cache):
"""
为单层实现反向传播的线性部分(第L层)
:param dZ: -相对于(当前第l层的)线性输出的成本梯度
:param cache: -来自当前层前向传播的值的元组(A_prev, w, b)
:return: dA_prev -相对于激活(前一层l-1)的成本梯度,与A_prev维度相同
dW -相对于W(当前层l)的成本梯度,与W的维度相同
db -相对于b(当前层l)的成本梯度,与b维度相同
"""
A_prev, W, b = cache
m = A_prev.shape[1]
dW = np.dot(dZ, A_prev.T) / m
db = np.sum(dZ, axis=1, keepdims=True) / m
dA_prev = np.dot(W.T, dZ)
assert (dA_prev.shape == A_prev.shape)
assert (dW.shape == W.shape)
assert (db.shape == b.shape)
return dA_prev, dW, db
# 测试linear_backward
print("====================测试linear_backward===================")
dZ, liner_cache = Deep_Learning.test4.testCases.linear_backward_test_case()
dA_prev, dW, db = linear_backward(dZ, liner_cache)
print("dA_prev = " + str(dA_prev))
print("dW = " + str(dW))
print("db = " + str(db))
"""
运行结果:
dA_prev = [[ 0.51822968 -0.19517421]
[-0.40506361 0.15255393]
[ 2.37496825 -0.89445391]]
dW = [[-0.10076895 1.40685096 1.64992505]]
db = [[0.50629448]]
"""
# 线性激活
def linear_activation_backward(dA, cache, activation="relu"):
"""
实现LINEAR -> ACTIVATION层的后向传播
:param dA: -当前层l的激活后的梯度值
:param cache: -存储的用于计算反向传播的值的元组(值为linear_cache, activation)
:param activation: -要在此层中使用的激活函数名,字符串类型,【"sigmoid" | "relu"】
:return: dA_prev -相对于激活(前一层l-1)的成本梯度下降,与A_prev维度相同
dW -相对于W(当前层l)的成本梯度值,与W维度相同
db -相对于b(当前层l)的成本梯度值,与b维度相同
"""
liner_cache, activation_cache = cache
if activation == "relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, liner_cache)
elif activation == "sigmoid":
dZ = sigmoid_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, liner_cache)
return dA_prev, dW, db
# 测试linear_activation_backward
print("测试=====================linear_activation_backward==============")
AL, linear_activaton_cache = Deep_Learning.test4.testCases.linear_activation_backward_test_case()
dA_prev, dW, db = linear_activation_backward(AL, linear_activaton_cache, activation="sigmoid")
print("sigmoid:")
print("dA_prev: " + str(dA_prev))
print("dW: " + str(dW))
print("db: " + str(db) + "\n")
dA_prev, dW, db = linear_activation_backward(AL, linear_activaton_cache, activation="relu")
print("relu:")
print("dA_prev: " + str(dA_prev))
print("dW: " + str(dW))
print("db: " + str(db))
"""
运行结果:
sigmoid:
dA_prev: [[ 0.11017994 0.01105339]
[ 0.09466817 0.00949723]
[-0.05743092 -0.00576154]]
dW: [[ 0.10266786 0.09778551 -0.01968084]]
db: [[-0.05729622]]
relu:
dA_prev: [[ 0.44090989 0. ]
[ 0.37883606 0. ]
[-0.2298228 0. ]]
dW: [[ 0.44513824 0.37371418 -0.10478989]]
db: [[-0.20837892]]
"""
# 多层模型的向后传播函数
def L_model_backward(AL, Y, caches):
"""
对[LINEAR -> RELU] * (L-1) -> LINEAR -> SIGMOID组执行反向传播,即多层神经网络的向后传播
:param AL: -概论向量,正向传播的输出(L_model_forward())
:param Y: -便签向量(例如如果不是猫则为0,如果是猫则为1,维度为(1, 数量))
:param caches: -包含以下内容的cache列表:
linear_activation_forward("relu")的cache,不包含输出层
linear_activation_forward("sigmoid")的cache
:return: grads -具有梯度值的字典
grads["dA" + str(l)] = ...
grads["dW" + str(l)] = ...
grads["db" + str(l)] = ...
"""
grads = {}
L = len(caches)
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL)) # 参见笔记2.9:dL(a,y)/a=-y/a+(1-y)/(1-a)
current_cache = caches[L - 1] # 存储caches里面的最后一个值
grads["dA" + str(L - 1)], grads["dW" + str(L)], grads["db" + str(L)] = linear_activation_backward(dAL, current_cache, "sigmoid") # 返回dA[L-1],dW[L],db[L]
for l in reversed(range(L - 1)): # reversed.range(L-1)表示从倒数第二个元素开始,从后向前到第一个元素结束
current_cache = caches[l] # cache[l]表示的是caches[L-2],即caches[]中存储的倒数第二个cache
dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads["dA" + str(l + 1)], current_cache, "relu") # dA_prev_temp=dA[L-2]
grads["dA" + str(l)] = dA_prev_temp
grads["dW" + str(l + 1)] = dW_temp
grads["db" + str(l + 1)] = db_temp
return grads
# 测试L_model_backward
print("==================测试L_model_backward==============")
AL, Y_assess, caches = Deep_Learning.test4.testCases.L_model_backward_test_case()
grads = L_model_backward(AL, Y_assess, caches)
print("dW1 = " + str(grads["dW1"]))
print("db1 = " + str(grads["db1"]))
print("dA0 = " + str(grads["dA0"]))
"""
运行结果:
dW1 = [[0.41010002 0.07807203 0.13798444 0.10502167]
[0. 0. 0. 0. ]
[0.05283652 0.01005865 0.01777766 0.0135308 ]]
db1 = [[-0.22007063]
[ 0. ]
[-0.02835349]]
dA0 = [[ 0. 0.52257901]
[ 0. -0.3269206 ]
[ 0. -0.32070404]
[ 0. -0.74079187]]
"""
# 更新参数
def update_parameters(parameters, grads, learning_rate):
"""
使用梯度下降更新参数
:param parameters: -包含参数的字典
:param grads: -包含梯度值的字典,是L_model_backward的输出
:param learning_rate: -学习速率
:return: parameters -包含更新参数的字典
参数["W" + str(l)] = ...
参数["b" + str(l)] = ...
"""
L = len(parameters) // 2 # 整除
for l in range(L):
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
return parameters
# 测试update_parameters
print("=========================测试update_parameters====================")
parameters, grads = Deep_Learning.test4.testCases.update_parameters_test_case()
parameters = update_parameters(parameters, grads, 0.1)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
运行结果:
W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008]
[-1.76569676 -0.80627147 0.51115557 -1.18258802]
[-1.0535704 -0.86128581 0.68284052 2.20374577]]
b1 = [[-0.04659241]
[-1.28888275]
[ 0.53405496]]
W2 = [[-0.55569196 0.0354055 1.32964895]]
b2 = [[-0.84610769]]
"""
# 正式开始搭建两层的神经网络
def two_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False, isPlot=True):
"""
实现一个两层的神经网络,[LINEAR -> RELU] -> [LINEAR -> SIGMOID]
:param X: -输入数据,维度为(n_x, 示例数)
:param Y: -标签向量,0为非猫,1为猫,维度为(1, 数量)
:param layers_dims: -层数的向量,维度为(n_x,n_h,n_y)
:param learning_rate: -学习速率
:param num_iterations: -迭代次数
:param print_cost: -是否打印成本值,每100次打印一次
:param isPlot: -是否绘制出误差值的图谱
:return: parameters -一个包含W1, b1, W2, b2的字典变量
"""
np.random.seed(1)
grads = {}
costs = []
(n_x, n_h, n_y) = layers_dims
# 1.初始化参数
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# 2.开始迭代
for i in range(0, num_iterations):
# 2.1前向传播
A1, cache1 = linear_activation_forward(X, W1, b1, "relu")
A2, cache2 = linear_activation_forward(A1, W2, b2, "sigmoid")
# 2.2计算成本
cost = compute_cost(A2, Y)
# 2.3后向传播
# 2.3.1初始化后向传播
dA2 = -(np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# 2.3.2向后传播,输入“dA2, cache2, cache1”,输出“dA1, dW2, db2, dA0(未使用), dW1, db1”
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, "sigmoid")
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, "relu")
# 2.3.3向后传播完成后的数据保存到grads
grads["dW1"] = dW1
grads["db1"] = db1
grads["dW2"] = dW2
grads["db2"] = db2
# 2.4更新参数
parameters = update_parameters(parameters, grads, learning_rate)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# 打印成本值,如果print_cost=False则不打印
if i % 100 == 0:
# 记录成本
costs.append(cost)
# 是否打印成本值
if print_cost:
print("第", i, "次迭代,成本值为:", np.squeeze(cost))
# 3.迭代完成,根据条件绘图
if isPlot:
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# 加载数据集,数据集为第二周作业数据
"""
????????????????数据集的加载还需要熟悉,不是很懂
"""
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = Deep_Learning.test4.lr_utils.load_dataset()
train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
train_x = train_x_flatten / 255
train_y = train_set_y
test_x = test_x_flatten / 255
test_y = test_set_y
# 加载数据完成,开始正式训练
n_x = 12288
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
parameters = two_layer_model(train_x, train_set_y, layers_dims=(n_x, n_h, n_y), num_iterations=2500, print_cost=True, isPlot=True)
# 预测函数
def predict(X, y, parameters):
"""
预测L层神经网络的结果
:param X: -测试集
:param y: -标签
:param parameters: -训练模型的参数
:return: p -给定数据集X的预测
"""
m = X.shape[1]
n = len(parameters) // 2 # 神经网络的层数
p = np.zeros((1, m))
# 根据参数前向传播
AL, caches = L_model_forward(X, parameters)
for i in range(0, AL.shape[1]):
if AL[0, i] > 0.5:
p[0, i] = 1
else:
p[0, i] = 0
print("准确度为:" + str(float(np.sum((p == y)) / m)))
return p
# 查看训练集和测试集的准确性
print("=====================对于两层神经网络而言====================")
predictions_train = predict(train_x, train_y, parameters)
predictions_test = predict(test_x, test_y, parameters)
# 正式搭建多层神经网络
def L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False, isPlot=True):
"""
实现一个L层的神经网络[LINEAR -> RELU] * (L-1) -> LINEAR -> SIGMOID
:param X: -输入数据,维度为(n_x, 示例数)
:param Y: -标签向量,0为非猫,1为猫,维度为(1,数量)
:param layers_dims: -层数的向量,维度为(n_x, n_h, n_y)
:param learning_rate: -学习速率
:param num_iterations: -迭代次数
:param print_cost: -是否打印成本,每100次打印一次
:param isPlot: -是否绘制出误差值的图谱
:return: parameters -模型学习的参数,可以用来预测
"""
np.random.seed(1)
costs = []
parameters = initialize_parameters_deep(layers_dims)
for i in range(0, num_iterations):
AL, caches = L_model_forward(X, parameters)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL, Y, caches)
parameters = update_parameters(parameters, grads, learning_rate)
# 打印成本值,若print_cost=False则忽略
if i % 100 == 0:
# 记录成本
costs.append(cost)
# 是否打印成本值
if print_cost:
print("第", i, "次迭代,成本值为:", np.squeeze(cost))
# 迭代完成,根据条件绘图
if isPlot:
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# 加载数据集
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = Deep_Learning.test4.lr_utils.load_dataset()
train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
train_x = train_x_flatten / 255
train_y = train_set_y
test_x = test_x_flatten / 255
test_y = test_set_y
# 数据集加载完成,正式开始训练
layers_dims = [12288, 20, 7, 5, 1] # 5-layer model
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations=2500, print_cost=True, isPlot=True)
# 查看预测结果的准确性
print("=========================对于L层神经网络而言=======================")
pred_train = predict(train_x, train_y, parameters)
pred_test = predict(test_x, test_y, parameters)
# 分析:查看在L层被错误标记的图片
def print_mislabeled_images(classes, X, y, p):
"""
绘制预测和实际不同的图像
:param classes: -保存的是以bytes类型保存的两个字符串数据,数据位
:param X: -数据集
:param y: -实际的标签
:param p: -预测
:return:
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # 设置图片默认大小
num_images = len(mislabeled_indices[0])
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(2, num_images, i + 1) # subplot(行,列,索引的位置),索引从1开始
plt.imshow(X[:, index].reshape(64, 64, 3), interpolation='nearest') # 最近邻插值
plt.axis('off') # 关闭坐标轴
plt.title("Prediction:" + classes[int(p[0, index])].decode("utf-8") + "\n Class:"
+ classes[y[0, index]].decode("utf-8"))
plt.show()
print_mislabeled_images(classes, test_x, test_y, pred_test)
"""
模型表现欠佳的图像原因包括:
猫身体在一个不同的位置
猫出现相似颜色背景下
不同的猫的颜色和品种
相机角度
图片宽度
比例变化(猫的图像非常大或很小)
"""
# 选做:使用自己的图片,把图片放在特定位置然后识别
from PIL import Image
num_px = 64
my_label_y = [1] # the true class of your image(1 -> cat, 0 -> non_cat)
# 读取--转三通道RGB(如果本身是三通道可以移除)--变换像素64*64image = Image.open("img/my_image.jpg")
my_image = np.array(Image.open('img/my_image.jpg').convert("RGB").resize((num_px, num_px)))
predict_image = my_image.reshape((num_px * num_px * 3, 1))
print("=======================预测自己准备图片的准确性======================")
predict_my_image = predict(predict_image, my_label_y, parameters)
plt.imshow(my_image)
plt.show()
print("y = " + str(np.squeeze(predict_my_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(predict_my_image))].decode("utf-8") + "\" picture.")
"""
运行结果:
y = 1.0, your L-layer model predicts a "cat" picture.
"""
testCases.py
import numpy as np
def linear_forward_test_case():
np.random.seed(1)
"""
X = np.array([[-1.02387576, 1.12397796],
[-1.62328545, 0.64667545],
[-1.74314104, -0.59664964]])
W = np.array([[ 0.74505627, 1.97611078, -1.24412333]])
b = np.array([[1]])
"""
A = np.random.randn(3,2)
W = np.random.randn(1,3)
b = np.random.randn(1,1)
return A, W, b
def linear_activation_forward_test_case():
"""
X = np.array([[-1.02387576, 1.12397796],
[-1.62328545, 0.64667545],
[-1.74314104, -0.59664964]])
W = np.array([[ 0.74505627, 1.97611078, -1.24412333]])
b = 5
"""
np.random.seed(2)
A_prev = np.random.randn(3,2)
W = np.random.randn(1,3)
b = np.random.randn(1,1)
return A_prev, W, b
def L_model_forward_test_case():
"""
X = np.array([[-1.02387576, 1.12397796],
[-1.62328545, 0.64667545],
[-1.74314104, -0.59664964]])
parameters = {'W1': np.array([[ 1.62434536, -0.61175641, -0.52817175],
[-1.07296862, 0.86540763, -2.3015387 ]]),
'W2': np.array([[ 1.74481176, -0.7612069 ]]),
'b1': np.array([[ 0.],
[ 0.]]),
'b2': np.array([[ 0.]])}
"""
np.random.seed(1)
X = np.random.randn(4,2)
W1 = np.random.randn(3,4)
b1 = np.random.randn(3,1)
W2 = np.random.randn(1,3)
b2 = np.random.randn(1,1)
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return X, parameters
def compute_cost_test_case():
Y = np.asarray([[1, 1, 1]])
aL = np.array([[.8,.9,0.4]])
return Y, aL
def linear_backward_test_case():
"""
z, linear_cache = (np.array([[-0.8019545 , 3.85763489]]), (np.array([[-1.02387576, 1.12397796],
[-1.62328545, 0.64667545],
[-1.74314104, -0.59664964]]), np.array([[ 0.74505627, 1.97611078, -1.24412333]]), np.array([[1]]))
"""
np.random.seed(1)
dZ = np.random.randn(1,2)
A = np.random.randn(3,2)
W = np.random.randn(1,3)
b = np.random.randn(1,1)
linear_cache = (A, W, b)
return dZ, linear_cache
def linear_activation_backward_test_case():
"""
aL, linear_activation_cache = (np.array([[ 3.1980455 , 7.85763489]]), ((np.array([[-1.02387576, 1.12397796], [-1.62328545, 0.64667545], [-1.74314104, -0.59664964]]), np.array([[ 0.74505627, 1.97611078, -1.24412333]]), 5), np.array([[ 3.1980455 , 7.85763489]])))
"""
np.random.seed(2)
dA = np.random.randn(1,2)
A = np.random.randn(3,2)
W = np.random.randn(1,3)
b = np.random.randn(1,1)
Z = np.random.randn(1,2)
linear_cache = (A, W, b)
activation_cache = Z
linear_activation_cache = (linear_cache, activation_cache)
return dA, linear_activation_cache
def L_model_backward_test_case():
"""
X = np.random.rand(3,2)
Y = np.array([[1, 1]])
parameters = {'W1': np.array([[ 1.78862847, 0.43650985, 0.09649747]]), 'b1': np.array([[ 0.]])}
aL, caches = (np.array([[ 0.60298372, 0.87182628]]), [((np.array([[ 0.20445225, 0.87811744],
[ 0.02738759, 0.67046751],
[ 0.4173048 , 0.55868983]]),
np.array([[ 1.78862847, 0.43650985, 0.09649747]]),
np.array([[ 0.]])),
np.array([[ 0.41791293, 1.91720367]]))])
"""
np.random.seed(3)
AL = np.random.randn(1, 2)
Y = np.array([[1, 0]])
A1 = np.random.randn(4,2)
W1 = np.random.randn(3,4)
b1 = np.random.randn(3,1)
Z1 = np.random.randn(3,2)
linear_cache_activation_1 = ((A1, W1, b1), Z1)
A2 = np.random.randn(3,2)
W2 = np.random.randn(1,3)
b2 = np.random.randn(1,1)
Z2 = np.random.randn(1,2)
linear_cache_activation_2 = ( (A2, W2, b2), Z2)
caches = (linear_cache_activation_1, linear_cache_activation_2)
return AL, Y, caches
def update_parameters_test_case():
"""
parameters = {'W1': np.array([[ 1.78862847, 0.43650985, 0.09649747],
[-1.8634927 , -0.2773882 , -0.35475898],
[-0.08274148, -0.62700068, -0.04381817],
[-0.47721803, -1.31386475, 0.88462238]]),
'W2': np.array([[ 0.88131804, 1.70957306, 0.05003364, -0.40467741],
[-0.54535995, -1.54647732, 0.98236743, -1.10106763],
[-1.18504653, -0.2056499 , 1.48614836, 0.23671627]]),
'W3': np.array([[-1.02378514, -0.7129932 , 0.62524497],
[-0.16051336, -0.76883635, -0.23003072]]),
'b1': np.array([[ 0.],
[ 0.],
[ 0.],
[ 0.]]),
'b2': np.array([[ 0.],
[ 0.],
[ 0.]]),
'b3': np.array([[ 0.],
[ 0.]])}
grads = {'dW1': np.array([[ 0.63070583, 0.66482653, 0.18308507],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ],
[ 0. , 0. , 0. ]]),
'dW2': np.array([[ 1.62934255, 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ]]),
'dW3': np.array([[-1.40260776, 0. , 0. ]]),
'da1': np.array([[ 0.70760786, 0.65063504],
[ 0.17268975, 0.15878569],
[ 0.03817582, 0.03510211]]),
'da2': np.array([[ 0.39561478, 0.36376198],
[ 0.7674101 , 0.70562233],
[ 0.0224596 , 0.02065127],
[-0.18165561, -0.16702967]]),
'da3': np.array([[ 0.44888991, 0.41274769],
[ 0.31261975, 0.28744927],
[-0.27414557, -0.25207283]]),
'db1': 0.75937676204411464,
'db2': 0.86163759922811056,
'db3': -0.84161956022334572}
"""
np.random.seed(2)
W1 = np.random.randn(3,4)
b1 = np.random.randn(3,1)
W2 = np.random.randn(1,3)
b2 = np.random.randn(1,1)
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
np.random.seed(3)
dW1 = np.random.randn(3,4)
db1 = np.random.randn(3,1)
dW2 = np.random.randn(1,3)
db2 = np.random.randn(1,1)
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return parameters, grads
dnn_utils.py
import numpy as np
def sigmoid(Z):
"""
Implements the sigmoid activation in numpy
Arguments:
Z -- numpy array of any shape
Returns:
A -- output of sigmoid(z), same shape as Z
cache -- returns Z as well, useful during backpropagation
"""
A = 1/(1+np.exp(-Z))
cache = Z
return A, cache
def sigmoid_backward(dA, cache):
"""
Implement the backward propagation for a single SIGMOID unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def relu(Z):
"""
Implement the RELU function.
Arguments:
Z -- Output of the linear layer, of any shape
Returns:
A -- Post-activation parameter, of the same shape as Z
cache -- a python dictionary containing "A" ; stored for computing the backward pass efficiently
"""
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
cache = Z
return A, cache
def relu_backward(dA, cache):
"""
Implement the backward propagation for a single RELU unit.
Arguments:
dA -- post-activation gradient, of any shape
cache -- 'Z' where we store for computing backward propagation efficiently
Returns:
dZ -- Gradient of the cost with respect to Z
"""
Z = cache
dZ = np.array(dA, copy=True) # just converting dz to a correct object.
# When z <= 0, you should set dz to 0 as well.
dZ[Z <= 0] = 0
assert (dZ.shape == Z.shape)
return dZ
lr_utils.py
import numpy as np
import h5py
def load_dataset():
train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
运行结果
=========================测试initialize_parameters======================
W1 = [[4.17022005e-03 7.20324493e-03 1.14374817e-06]
[3.02332573e-03 1.46755891e-03 9.23385948e-04]]
b1 = [[0.]
[0.]]
W2 = [[0.0018626 0.00345561]]
b2 = [[0.]]
==================测试initialize_parameters_deep======================
W1 = [[ 0.79989897 0.19521314 0.04315498 -0.83337927 -0.12405178]
[-0.15865304 -0.03700312 -0.28040323 -0.01959608 -0.21341839]
[-0.58757818 0.39561516 0.39413741 0.76454432 0.02237573]
[-0.18097724 -0.24389238 -0.69160568 0.43932807 -0.49241241]]
b1 = [[0.]
[0.]
[0.]
[0.]]
W2 = [[-0.59252326 -0.10282495 0.74307418 0.11835813]
[-0.51189257 -0.3564966 0.31262248 -0.08025668]
[-0.38441818 -0.11501536 0.37252813 0.98805539]]
b2 = [[0.]
[0.]
[0.]]
=================测试linear_forward================
Z = [[ 3.26295337 -1.23429987]]
====================测试linear_activation_forward======================
sigmoid, A = [[0.96890023 0.11013289]]
ReLU, A = [[3.43896131 0. ]]
=====================测试L_model_forward========================
AL = [[0.17007265 0.2524272 ]]
cache的长度为 = 2
==========================测试compute_cost====================
cost = 0.414931599615397
====================测试linear_backward===================
dA_prev = [[ 0.51822968 -0.19517421]
[-0.40506361 0.15255393]
[ 2.37496825 -0.89445391]]
dW = [[-0.10076895 1.40685096 1.64992505]]
db = [[0.50629448]]
测试=====================linear_activation_backward==============
sigmoid:
dA_prev: [[ 0.11017994 0.01105339]
[ 0.09466817 0.00949723]
[-0.05743092 -0.00576154]]
dW: [[ 0.10266786 0.09778551 -0.01968084]]
db: [[-0.05729622]]
relu:
dA_prev: [[ 0.44090989 0. ]
[ 0.37883606 0. ]
[-0.2298228 0. ]]
dW: [[ 0.44513824 0.37371418 -0.10478989]]
db: [[-0.20837892]]
==================测试L_model_backward==============
dW1 = [[0.41010002 0.07807203 0.13798444 0.10502167]
[0. 0. 0. 0. ]
[0.05283652 0.01005865 0.01777766 0.0135308 ]]
db1 = [[-0.22007063]
[ 0. ]
[-0.02835349]]
dA0 = [[ 0. 0.52257901]
[ 0. -0.3269206 ]
[ 0. -0.32070404]
[ 0. -0.74079187]]
=========================测试update_parameters====================
W1 = [[-0.59562069 -0.09991781 -2.14584584 1.82662008]
[-1.76569676 -0.80627147 0.51115557 -1.18258802]
[-1.0535704 -0.86128581 0.68284052 2.20374577]]
b1 = [[-0.04659241]
[-1.28888275]
[ 0.53405496]]
W2 = [[-0.55569196 0.0354055 1.32964895]]
b2 = [[-0.84610769]]
第 0 次迭代,成本值为: 0.829942437000611
第 100 次迭代,成本值为: 0.7818573247017775
第 200 次迭代,成本值为: 0.6539324137759398
第 300 次迭代,成本值为: 0.6514620469500948
第 400 次迭代,成本值为: 0.6479215647343921
第 500 次迭代,成本值为: 0.6370394959826985
第 600 次迭代,成本值为: 0.6232298722708977
第 700 次迭代,成本值为: 0.5898122359183274
第 800 次迭代,成本值为: 0.5494888383394619
第 900 次迭代,成本值为: 0.5081456895132139
第 1000 次迭代,成本值为: 0.4684114873896508
第 1100 次迭代,成本值为: 0.43101822373691157
第 1200 次迭代,成本值为: 0.39592674052199744
第 1300 次迭代,成本值为: 0.36297399801937474
第 1400 次迭代,成本值为: 0.3322907935221445
第 1500 次迭代,成本值为: 0.3016329118998794
第 1600 次迭代,成本值为: 0.27214637093651933
第 1700 次迭代,成本值为: 0.24269206419611508
第 1800 次迭代,成本值为: 0.19083073982532248
第 1900 次迭代,成本值为: 0.11637694704551979
第 2000 次迭代,成本值为: 0.09759870407998461
第 2100 次迭代,成本值为: 0.08281830207274377
第 2200 次迭代,成本值为: 0.07098612498105607
第 2300 次迭代,成本值为: 0.06142718125431676
第 2400 次迭代,成本值为: 0.053635364487233765
=====================对于两层神经网络而言====================
准确度为:0.9952153110047847
准确度为:0.68
第 0 次迭代,成本值为: 0.715731513413713
第 100 次迭代,成本值为: 0.6747377593469114
第 200 次迭代,成本值为: 0.6603365433622127
第 300 次迭代,成本值为: 0.6462887802148751
第 400 次迭代,成本值为: 0.6298131216927773
第 500 次迭代,成本值为: 0.606005622926534
第 600 次迭代,成本值为: 0.5690041263975134
第 700 次迭代,成本值为: 0.519796535043806
第 800 次迭代,成本值为: 0.46415716786282285
第 900 次迭代,成本值为: 0.40842030048298916
第 1000 次迭代,成本值为: 0.37315499216069026
第 1100 次迭代,成本值为: 0.30572374573047106
第 1200 次迭代,成本值为: 0.26810152847740837
第 1300 次迭代,成本值为: 0.23872474827672654
第 1400 次迭代,成本值为: 0.20632263257914718
第 1500 次迭代,成本值为: 0.17943886927493605
第 1600 次迭代,成本值为: 0.1579873581880163
第 1700 次迭代,成本值为: 0.14240413012274492
第 1800 次迭代,成本值为: 0.12865165997888675
第 1900 次迭代,成本值为: 0.1124431499816437
第 2000 次迭代,成本值为: 0.08505631034982422
第 2100 次迭代,成本值为: 0.05758391198616691
第 2200 次迭代,成本值为: 0.044567534546991264
第 2300 次迭代,成本值为: 0.03808275166600256
第 2400 次迭代,成本值为: 0.034410749018419895
=========================对于L层神经网络而言=======================
准确度为:0.9952153110047847
准确度为:0.78
=======================预测自己准备图片的准确性======================
准确度为:1.0
y = 1.0, your L-layer model predicts a "cat" picture.
依次显示的图片
两层神经网络的代价函数图像
L层神经网络的代价函数
预测错误的图片
经过处理后的自己准备的猫的图片