from datasets.testCases import*from datasets.planar_utils import sigmoid
# GRADED FUNCTION: layer_sizesdeflayer_sizes(X, Y):"""
Arguments:
X -- input dataset of shape (input size, number of examples)
Y -- labels of shape (output size, number of examples)
Returns:
n_x -- the size of the input layer
n_h -- the size of the hidden layer
n_y -- the size of the output layer
"""
n_x = X.shape[0]
n_h =4
n_y = Y.shape[0]return n_x, n_h, n_y
# GRADED FUNCTION: initialize_parametersdefinitialize_parameters(n_x, n_h, n_y):"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2)# 设置种子,令random随机数相同
W1 = np.random.randn(n_h, n_x)*0.01# W的行确定该层神经元节点个数,列匹配上一层神经元节点个数
b1 = np.zeros((n_h,1))# zeros(shape, dtype=float, order='C'),shape = (n_h, 1),需要加括号,否则1会填充给dtype
W2 = np.random.randn(n_y, n_h)*0.01
b2 = np.zeros((n_y,1))assert(W1.shape ==(n_h, n_x))assert(b1.shape ==(n_h,1))assert(W2.shape ==(n_y, n_h))assert(b2.shape ==(n_y,1))
parameters ={"W1": W1,"b1": b1,"W2": W2,"b2": b2}return parameters
# GRADED FUNCTION: forward_propagationdefforward_propagation(X, parameters):"""
Argument:
X -- input data of size (n_x, m)
parameters -- python dictionary containing your parameters (output of initialization function)
Returns:
A2 -- The sigmoid output of the second activation
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2"
"""
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
Z1 = np.dot(W1, X)+ b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1)+ b2
A2 = sigmoid(Z2)assert(A2.shape ==(1, X.shape[1]))
cache ={"Z1": Z1,"A1": A1,"Z2": Z2,"A2": A2}return A2, cache
# GRADED FUNCTION: compute_costdefcompute_cost(A2, Y, parameters):"""
Computes the cross-entropy cost given in equation (13)
Arguments:
A2 -- The sigmoid output of the second activation, of shape (1, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
parameters -- python dictionary containing your parameters W1, b1, W2 and b2
Returns:
cost -- cross-entropy cost given equation (13)
"""
m = Y.shape[1]# number of example
loss =-(np.dot(np.log(A2), Y.T)+ np.dot(np.log(1- A2),(1- Y).T))
cost = loss / m
cost =float(np.squeeze(cost))# makes sure cost is the dimension we expect.assert(isinstance(cost,float))return cost
# GRADED FUNCTION: backward_propagationdefbackward_propagation(parameters, cache, X, Y):"""
Implement the backward propagation using the instructions above.
Arguments:
parameters -- python dictionary containing our parameters
cache -- a dictionary containing "Z1", "A1", "Z2" and "A2".
X -- input data of shape (2, number of examples)
Y -- "true" labels vector of shape (1, number of examples)
Returns:
grads -- python dictionary containing your gradients with respect to different parameters
"""
m = X.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
A1 = cache["A1"]
A2 = cache["A2"]
dZ2 = A2 - Y
dW2 = np.dot(dZ2, A1.T)/ m
db2 = np.sum(dZ2, axis=1, keepdims=True)/ m
dZ1 = np.multiply(np.dot(W2.T, dZ2),1- np.power(A1,2))
dW1 = np.dot(dZ1, X.T)/ m
db1 = np.sum(dZ1, axis=1, keepdims=True)/ m
grads ={"dW1": dW1,"db1": db1,"dW2": dW2,"db2": db2}return grads
# GRADED FUNCTION: update_parametersdefupdate_parameters(parameters, grads, learning_rate=1.5):"""
Updates parameters using the gradient descent update rule given above
Arguments:
parameters -- python dictionary containing your parameters
grads -- python dictionary containing your gradients
Returns:
parameters -- python dictionary containing your updated parameters
"""
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
W1 -= learning_rate * dW1
b1 -= learning_rate * db1
W2 -= learning_rate * dW2
b2 -= learning_rate * db2
parameters ={"W1": W1,"b1": b1,"W2": W2,"b2": b2}return parameters
# GRADED FUNCTION: nn_modeldefnn_model(X, Y, n_h, num_iterations=10000, print_cost=False):"""
Arguments:
X -- dataset of shape (2, number of examples)
Y -- labels of shape (1, number of examples)
n_h -- size of the hidden layer
num_iterations -- Number of iterations in gradient descent loop
print_cost -- if True, print the cost every 1000 iterations
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]# Loop (gradient descent)for i inrange(num_iterations):
A2, cache = forward_propagation(X, parameters)# Forward propagation.
cost = compute_cost(A2, Y, parameters)# Cost function.
grads = backward_propagation(parameters, cache, X, Y)# Backpropagation.
parameters = update_parameters(parameters, grads)# Gradient descent parameter update.# Print the cost every 1000 iterationsif print_cost and i %1000==0:print("Cost after iteration %i: %f"%(i, cost))return parameters
# GRADED FUNCTION: predictdefpredict(parameters, X):"""
Using the learned parameters, predicts a class for each example in X
Arguments:
parameters -- python dictionary containing your parameters
X -- input data of size (n_x, m)
Returns
predictions -- vector of predictions of our model (red: 0 / blue: 1)
"""
A2, cache = forward_propagation(X, parameters)
predictions = np.round(A2)# np.round()对浮点数四舍五入,取0-1预测值return predictions
runwork . py 跑数据
import numpy as np
import matplotlib.pyplot as plt
from network import nn_model, predict
from datasets.planar_utils import plot_decision_boundary, load_planar_dataset
X, Y = load_planar_dataset()"""
或者测试不同的数据集
拿到选中测试集的X, Y
noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure = load_extra_datasets()
datasets = {"noisy_circles": noisy_circles,
"noisy_moons": noisy_moons,
"blobs": blobs,
"gaussian_quantiles": gaussian_quantiles}
选择测试集
dataset = "blobs"
X, Y = datasets[dataset]
X, Y = X.T, Y.reshape(1, Y.shape[0])
# make blobs binary
if dataset == "blobs":
Y = Y % 2
# Visualize the data
plt.scatter(X[0, :], X[1, :], c=np.squeeze(Y), s=40, cmap=plt.cm.Spectral)
plt.title("Decision Boundary for hidden layer size " + str(4))
"""
parameters = nn_model(X, Y, n_h=4, num_iterations=10000, print_cost=True)#绘制边界
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size "+str(4))
predictions = predict(parameters, X)print('准确率: %d'%float((np.dot(Y, predictions.T)+ np.dot(1- Y,1- predictions.T))/float(Y.size)*100)+'%')
planar_utils . py 提供部分功能
import matplotlib.pyplot as plt
import numpy as np
import sklearn
import sklearn.datasets
import sklearn.linear_model
defplot_decision_boundary(model, X, y):# Set min and max values and give it some padding
x_min, x_max = X[0,:].min()-1, X[0,:].max()+1
y_min, y_max = X[1,:].min()-1, X[1,:].max()+1
h =0.01# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))# Predict the function value for the whole grid
Z = model(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.ylabel('x2')
plt.xlabel('x1')
plt.scatter(X[0,:], X[1,:], c=np.squeeze(y), cmap=plt.cm.Spectral)defsigmoid(x):
s =1/(1+np.exp(-x))return s
defload_planar_dataset():
np.random.seed(1)
m =400# number of examples
N =int(m/2)# number of points per class
D =2# dimensionality
X = np.zeros((m, D))# data matrix where each row is a single example
Y = np.zeros((m,1), dtype='uint8')# labels vector (0 for red, 1 for blue)
a =4# maximum ray of the flowerfor j inrange(2):
ix =range(N*j, N*(j+1))
t = np.linspace(j*3.12,(j+1)*3.12, N)+ np.random.randn(N)*0.2# theta
r = a*np.sin(4*t)+ np.random.randn(N)*0.2# radius
X[ix]= np.c_[r*np.sin(t), r*np.cos(t)]
Y[ix]= j
X = X.T
Y = Y.T
return X, Y
defload_extra_datasets():
N =200
noisy_circles = sklearn.datasets.make_circles(n_samples=N, factor=.5, noise=.3)
noisy_moons = sklearn.datasets.make_moons(n_samples=N, noise=.2)
blobs = sklearn.datasets.make_blobs(n_samples=N, random_state=5, n_features=2, centers=6)
gaussian_quantiles = sklearn.datasets.make_gaussian_quantiles(mean=None, cov=0.5, n_samples=N, n_features=2,
n_classes=2, shuffle=True, random_state=None)
no_structure = np.random.rand(N,2), np.random.rand(N,2)return noisy_circles, noisy_moons, blobs, gaussian_quantiles, no_structure
import numpy as np
from network_model import L_layer_model, L_model_forward
from lr_utils import load_dataset
defpredict(X, y, parameters):
m = X.shape[1]
L =len(parameters)//2
p = np.zeros((1, m))
probas, caches = L_model_forward(X, parameters)for i inrange(0, probas.shape[1]):if probas[0,i]>0.5:
p[0,i]=1else:
p[0,i]=0print("准确度为: "+str(float(np.sum((p == y))/m)))return p
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
train_x = train_x_flatten /255
train_y = train_set_y
test_x = test_x_flatten /255
test_y = test_set_y
layers_dims =[12288,20,7,5,1]
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations=2500, print_cost=True)
predictions_train = predict(train_x, train_y, parameters)# 训练集
predictions_test = predict(test_x, test_y, parameters)# 测试集
network_model . py 神经网络模型
函数
输入
输出
initialize_parameters_deep()
layers_dims
parameters
compute_cost()
AL, Y
cost
update_parameters()
parameters, grads, learning_rate
parameters
L_layer_model()
X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False
parameters
参数
解释
layers_dims
存放神经网络每层神经元数量的列表
AL
最后的激活值向量
Y
与训练集对应的标签向量
cost
计算出的成本
parameters
W,b参数的字典
grads
dW,db参数的字典
learning_rate
学习速率
num_iterations
迭代次数
point_cost
是否打印成本值
import numpy as np
import matplotlib.pyplot as plt
from forward_model import L_model_forward
from backward_model import L_model_backward
definitialize_parameters_deep(layers_dims):
np.random.seed(3)
parameters ={}# W[L]与b[L]的字典
L =len(layers_dims)# 神经网络层数for l inrange(1, L):
parameters['W'+str(l)]= np.random.randn(layers_dims[l], layers_dims[l -1])/ np.sqrt(layers_dims[l -1])# 使用 “/ np.sqrt(layer_dims[l-1])”避免梯度爆炸和消失
parameters['b'+str(l)]= np.zeros((layers_dims[l],1))assert(parameters['W'+str(l)].shape ==(layers_dims[l], layers_dims[l -1]))assert(parameters['b'+str(l)].shape ==(layers_dims[l],1))return parameters
defcompute_cost(AL, Y):
m = Y.shape[1]
cost =-np.sum(np.multiply(np.log(AL),Y)+ np.multiply(np.log(1- AL),1- Y))/ m
cost = np.squeeze(cost)assert(cost.shape ==())return cost
defupdate_parameters(parameters, grads, learning_rate):
L =len(parameters)//2for l inrange(L):
parameters["W"+str(l +1)]= parameters["W"+str(l +1)]- learning_rate * grads["dW"+str(l +1)]
parameters["b"+str(l +1)]= parameters["b"+str(l +1)]- learning_rate * grads["db"+str(l +1)]return parameters
defL_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False):
np.random.seed(1)
costs =[]
acc_dev =[]
parameters = initialize_parameters_deep(layers_dims)for i inrange(0, num_iterations):
AL, caches = L_model_forward(X, parameters)
cost = compute_cost(AL, Y)
grads = L_model_backward(AL, Y, caches)
parameters = update_parameters(parameters, grads, learning_rate)if i %100==0:# 记录成本
costs.append(cost)# 是否打印成本值if print_cost:print("第", i,"次迭代,成本值为:", np.squeeze(cost))
plt.plot(np.squeeze(costs))
plt.plot(np.squeeze(acc_dev))
plt.legend(["costs","acc_dev"], loc=0)
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate ="+str(learning_rate))
plt.show()return parameters
forward_model . py 前向传播模型
函数
输入
输出
linear_forward()
A_prev, W, b
Z,cache
linear_activation_forward()
A_prev, W, b, activation
A,cache
L_model_forward()
X, parameters
AL,caches
参数
解释
A_prev
上一层的激活值
W
本层的参数W
b
本层的参数b
activation
选择的激活函数
X
训练集向量
Z
激活功能的输入,即预激活参数
cache
包含linear_cache和activation_cache的字典
linear_cache
包含A_prev, W, b参数的字典
activation_cache
包含Z参数的字典
AL
最后的激活值向量
caches
cache的列表
import numpy as np
from dnn_utils import sigmoid, relu
deflinear_forward(A_prev, W, b):
Z = np.dot(W, A_prev)+ b
assert(Z.shape ==(W.shape[0], A_prev.shape[1]))
cache =(A_prev, W, b)return Z, cache
deflinear_activation_forward(A_prev, W, b, activation):# linear_cache = (A_prev, W, b)# activation_cache = Z
Z, linear_cache = linear_forward(A_prev, W, b)if activation =="sigmoid":
A, activation_cache = sigmoid(Z)elif activation =="relu":
A, activation_cache = relu(Z)assert(A.shape ==(W.shape[0], A_prev.shape[1]))
cache =(linear_cache, activation_cache)return A, cache
defL_model_forward(X, parameters):
caches =[]
A = X # A[0] == X
L =len(parameters)//2for l inrange(1, L):
A_prev = A
A, cache = linear_activation_forward(A_prev, parameters['W'+str(l)], parameters['b'+str(l)],"relu")
caches.append(cache)
AL, cache = linear_activation_forward(A, parameters['W'+str(L)], parameters['b'+str(L)],"sigmoid")
caches.append(cache)assert(AL.shape ==(1, X.shape[1]))return AL, caches
backward_model . py 反向传播模型
函数
输入
输出
linear_backward()
dZ, linear_cache
dA_prev, dW, db
linear_activation_backward()
dA, cache, activation
dA_prev, dW, db
L_model_backward()
AL, Y, caches
grads
参数
解释
dZ
前一层线性输出的成本梯度
dA
前一层激活的成本梯度
dW
本层的参数W的成本梯度
db
本层的参数b的成本梯度
activation
选择的激活函数
cache
包含linear_cache和activation_cache的字典
linear_cache
包含A_prev, W, b参数的字典
AL
最后的激活值向量
Y
与训练集对应的标签向量
caches
cache的列表
grads
dW,db参数的字典
import numpy as np
from dnn_utils import sigmoid_backward, relu_backward
deflinear_backward(dZ, linear_cache):
A_prev, W, b = linear_cache
m = A_prev.shape[1]
dW = np.dot(dZ, A_prev.T)/ m
db = np.sum(dZ, axis=1, keepdims=True)/ m
dA_prev = np.dot(W.T, dZ)assert(dA_prev.shape == A_prev.shape)assert(dW.shape == W.shape)assert(db.shape == b.shape)return dA_prev, dW, db
deflinear_activation_backward(dA, cache, activation):
linear_cache, activation_cache = cache
if activation =="sigmoid":
dZ = sigmoid_backward(dA, activation_cache)elif activation =="relu":
dZ = relu_backward(dA, activation_cache)
dA_prev, dW, db = linear_backward(dZ, linear_cache)return dA_prev, dW, db
defL_model_backward(AL, Y, caches):
grads ={}
L =len(caches)
m = AL.shape[1]
Y = Y.reshape(AL.shape)
dAL =-(np.divide(Y, AL)- np.divide(1- Y,1- AL))
current_cache = caches[L -1]# caches从0开始计数
grads["dA"+str(L)], grads["dW"+str(L)], grads["db"+str(L)]= linear_activation_backward(dAL, current_cache,"sigmoid")for l inreversed(range(L -1)):# reversed()反转遍历
current_cache = caches[l]
grads["dA"+str(l +1)], grads["dW"+str(l +1)], grads["db"+str(l +1)]= linear_activation_backward(grads["dA"+str(l +2)], current_cache,"relu")return grads
dnn_utils . py 激活函数相关
函数
输入
输出
sigmoid()
Z
A,activation_cache
sigmoid_backward()
dA, activation_cache
dZ
relu()
Z
A,activation_cache
relu_backward()
dA, activation_cache
dZ
参数
解释
dZ
前一层线性输出的成本梯度
dA
前一层激活的成本梯度
Z
激活功能的输入,即预激活参数
A
前一层的激活值
activation_cache
包含参数Z的字典
import numpy as np
defsigmoid(Z):
activation_cache = Z
A =1/(1+np.exp(-Z))return A, activation_cache
defsigmoid_backward(dA, activation_cache):
Z = activation_cache
s =1/(1+np.exp(-Z))
dZ = dA * s *(1-s)assert(dZ.shape == Z.shape)return dZ
defrelu(Z):
activation_cache = Z
A = np.maximum(0, Z)assert(A.shape == Z.shape)return A, activation_cache
defrelu_backward(dA, activation_cache):
Z = activation_cache
dZ = np.array(dA, copy=True)
dZ[Z <=0]=0assert(dZ.shape == Z.shape)return dZ
lr_utils . py 数据集
import numpy as np
import h5py
defload_dataset():
train_dataset = h5py.File('datasets/train_catvnoncat.h5',"r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:])# your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:])# your train set labels
test_dataset = h5py.File('datasets/test_catvnoncat.h5',"r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:])# your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:])# your test set labels
classes = np.array(test_dataset["list_classes"][:])# the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes