转载过程中,图片丢失,代码显示错乱。
为了更好的学习内容,请访问原创版本:
http://www.missshi.cn/api/view/blog/59bbcacae519f50d04000202
Ps:初次访问由于js文件较大,请耐心等候(8s左右)
本文中,我们将以代码实战的方式来学习神经网络中的初始化、正则化以及进行梯度检查。
初始化
在我们模型训练的第一步中,我们首先需要给我们的模型参数一个初始值。
一个好的初始值有助于我们整个训练过程,一方面,可以加快梯度下降的收敛速度;另一方面,使得误差更有可能减小到一个更小的值。
接下来,让我们从实践中学习一下吧:
首先,是引入相关的库:
- import numpy as np
- import matplotlib.pyplot as plt
- import sklearn
- import sklearn.datasets
- from init_utils import sigmoid, relu, compute_loss, forward_propagation, backward_propagation
- from init_utils import update_parameters, predict, load_dataset, plot_decision_boundary, predict_dec
-
- %matplotlib inline
- plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
- plt.rcParams['image.interpolation'] = 'nearest'
- plt.rcParams['image.cmap'] = 'gray'
-
- # load image dataset: blue/red dots in circles
- train_X, train_Y, test_X, test_Y = load_dataset()
其中,一些函数的实现如下:
- def sigmoid(x):
- """
- Compute the sigmoid of x
-
- Arguments:
- x -- A scalar or numpy array of any size.
-
- Return:
- s -- sigmoid(x)
- """
- s = 1/(1+np.exp(-x))
- return s
-
- def relu(x):
- """
- Compute the relu of x
-
- Arguments:
- x -- A scalar or numpy array of any size.
-
- Return:
- s -- relu(x)
- """
- s = np.maximum(0,x)
-
- return s
-
- def compute_loss(a3, Y):
-
- """
- Implement the loss function
-
- Arguments:
- a3 -- post-activation, output of forward propagation
- Y -- "true" labels vector, same shape as a3
-
- Returns:
- loss - value of the loss function
- """
-
- m = Y.shape[1]
- logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
- loss = 1./m * np.nansum(logprobs)
-
- return loss
-
- def forward_propagation(X, parameters):
- """
- Implements the forward propagation (and computes the loss) presented in Figure 2.
-
- Arguments:
- X -- input dataset, of shape (input size, number of examples)
- Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
- parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
- W1 -- weight matrix of shape ()
- b1 -- bias vector of shape ()
- W2 -- weight matrix of shape ()
- b2 -- bias vector of shape ()
- W3 -- weight matrix of shape ()
- b3 -- bias vector of shape ()
-
- Returns:
- loss -- the loss function (vanilla logistic loss)
- """
-
- # retrieve parameters
- W1 = parameters["W1"]
- b1 = parameters["b1"]
- W2 = parameters["W2"]
- b2 = parameters["b2"]
- W3 = parameters["W3"]
- b3 = parameters["b3"]
-
- # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
- z1 = np.dot(W1, X) + b1
- a1 = relu(z1)
- z2 = np.dot(W2, a1) + b2
- a2 = relu(z2)
- z3 = np.dot(W3, a2) + b3
- a3 = sigmoid(z3)
-
- cache = (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3)
-
- return a3, cache
-
- def backward_propagation(X, Y, cache):
- """
- Implement the backward propagation presented in figure 2.
-
- Arguments:
- X -- input dataset, of shape (input size, number of examples)
- Y -- true "label" vector (containing 0 if cat, 1 if non-cat)
- cache -- cache output from forward_propagation()
-
- Returns:
- gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
- """
- m = X.shape[1]
- (z1, a1, W1, b1, z2, a2, W2, b2, z3, a3, W3, b3) = cache
-
- dz3 = 1./m * (a3 - Y)
- dW3 = np.dot(dz3, a2.T)
- db3 = np.sum(dz3, axis=1, keepdims = True)
-
- da2 = np.dot(W3.T, dz3)
- dz2 = np.multiply(da2, np.int64(a2 > 0))
- dW2 = np.dot(dz2, a1.T)
- db2 = np.sum(dz2, axis=1, keepdims = True)
-
- da1 = np.dot(W2.T, dz2)
- dz1 = np.multiply(da1, np.int64(a1 > 0))
- dW1 = np.dot(dz1, X.T)
- db1 = np.sum(dz1, axis=1, keepdims = True)
-
- gradients = {"dz3": dz3, "dW3": dW3, "db3": db3,
- "da2": da2, "dz2": dz2, "dW2": dW2, "db2": db2,
- "da1": da1, "dz1": dz1, "dW1": dW1, "db1": db1}
-
- return gradients
-
- def update_parameters(parameters, grads, learning_rate):
- """
- Update parameters using gradient descent
-
- Arguments:
- parameters -- python dictionary containing your parameters
- grads -- python dictionary containing your gradients, output of n_model_backward
-
- Returns:
- parameters -- python dictionary containing your updated parameters
- parameters['W' + str(i)] = ...
- parameters['b' + str(i)] = ...
- """
-
- L = len(parameters) // 2 # number of layers in the neural networks
-
- # Update rule for each parameter
- for k in range(L):
- parameters["W" + str(k+1)] = parameters["W" + str(k+1)] - learning_rate * grads["dW" + str(k+1)]
- parameters["b" + str(k+1)] = parameters["b" + str(k+1)] - learning_rate * grads["db" + str(k+1)]
-
- return parameters
-
- def predict(X, y, parameters):
- """
- This function is used to predict the results of a n-layer neural network.
-
- Arguments:
- X -- data set of examples you would like to label
- parameters -- parameters of the trained model
-
- Returns:
- p -- predictions for the given dataset X
- """
-
- m = X.shape[1]
- p = np.zeros((1,m), dtype = np.int)
-
- # Forward propagation
- a3, caches = forward_propagation(X, parameters)
-
- # convert probas to 0/1 predictions
- for i in range(0, a3.shape[1]):
- if a3[0,i] > 0.5:
- p[0,i] = 1
- else:
- p[0,i] = 0
-
- # print results
- print("Accuracy: " + str(np.mean((p[0,:] == y[0,:]))))
-
- return p
-
- def load_dataset():
- np.random.seed(1)
- train_X, train_Y = sklearn.datasets.make_circles(n_samples=300, noise=.05)
- np.random.seed(2)
- test_X, test_Y = sklearn.datasets.make_circles(n_samples=100, noise=.05)
- # Visualize the data
- plt.scatter(train_X[:, 0], train_X[:, 1], c=train_Y, s=40, cmap=plt.cm.Spectral);
- train_X = train_X.T
- train_Y = train_Y.reshape((1, train_Y.shape[0]))
- test_X = test_X.T
- test_Y = test_Y.reshape((1, test_Y.shape[0]))
- return train_X, train_Y, test_X, test_Y
-
- def plot_decision_boundary(model, X, y):
- # Set min and max values and give it some padding
- x_min, x_max = X[0, :].min() - 1, X[0, :].max() + 1
- y_min, y_max = X[1, :].min() - 1, X[1, :].max() + 1
- h = 0.01
- # Generate a grid of points with distance h between them
- xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
- # Predict the function value for the whole grid
- Z = model(np.c_[xx.ravel(), yy.ravel()])
- Z = Z.reshape(xx.shape)
- # Plot the contour and training examples
- plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
- plt.ylabel('x2')
- plt.xlabel('x1')
- plt.scatter(X[0, :], X[1, :], c=y, cmap=plt.cm.Spectral)
- plt.show()
-
- def predict_dec(parameters, X):
- """
- Used for plotting decision boundary.
-
- Arguments:
- parameters -- python dictionary containing your parameters
- X -- input data of size (m, K)
-
- Returns
- predictions -- vector of predictions of our model (red: 0 / blue: 1)
- """
-
- # Predict using forward propagation and a classification threshold of 0.5
- a3, cache = forward_propagation(X, parameters)
- predictions = (a3>0.5)
- return predictions
我们接下来的任务就是将图中的蓝色和红色点进行分割开来。
接下来,我们需要使用一个三层的神经网络模型来对该数据集进行分类。
模型如下:
- def model(X, Y, learning_rate = 0.01, num_iterations = 15000, print_cost = True, initialization = "he"):
- """
- Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
-
- Arguments:
- X -- input data, of shape (2, number of examples)
- Y -- true "label" vector (containing 0 for red dots; 1 for blue dots), of shape (1, number of examples)
- learning_rate -- learning rate for gradient descent
- num_iterations -- number of iterations to run gradient descent
- print_cost -- if True, print the cost every 1000 iterations
- initialization -- flag to choose which initialization to use ("zeros","random" or "he")
-
- Returns:
- parameters -- parameters learnt by the model
- """
-
- grads = {}
- costs = [] # to keep track of the loss
- m = X.shape[1] # number of examples
- layers_dims = [X.shape[0], 10, 5, 1]
-
- # Initialize parameters dictionary.
- if initialization == "zeros":
- parameters = initialize_parameters_zeros(layers_dims)
- elif initialization == "random":
- parameters = initialize_parameters_random(layers_dims)
- elif initialization == "he":
- parameters = initialize_parameters_he(layers_dims)
-
- # Loop (gradient descent)
-
- for i in range(0, num_iterations):
-
- # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
- a3, cache = forward_propagation(X, parameters)
-
- # Loss
- cost = compute_loss(a3, Y)
-
- # Backward propagation.
- grads = backward_propagation(X, Y, cache)
-
- # Update parameters.
- parameters = update_parameters(parameters, grads, learning_rate)
-
- # Print the loss every 1000 iterations
- if print_cost and i % 1000 == 0:
- print("Cost after iteration {}: {}".format(i, cost))
- costs.append(cost)
-
- # plot the loss
- plt.plot(costs)
- plt.ylabel('cost')
- plt.xlabel('iterations (per hundreds)')
- plt.title("Learning rate =" + str(learning_rate))
- plt.show()
-
- return parameters
在这个模型中,我们支持了三种不同的初始化方法:
1.零初始化
2.随机初始化
3.He初始化
三种初始化方法的实现如下:
零初始化:
- def initialize_parameters_zeros(layers_dims):
- """
- Arguments:
- layer_dims -- python array (list) containing the size of each layer.
-
- Returns:
- parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
- W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
- b1 -- bias vector of shape (layers_dims[1], 1)
- ...
- WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
- bL -- bias vector of shape (layers_dims[L], 1)
- """
-
- parameters = {}
- L = len(layers_dims) # number of layers in the network
-
- for l in range(1, L):
- ### START CODE HERE ### (≈ 2 lines of code)
- parameters['W' + str(l)] = np.zeros((layers_dims[l], layers_dims[l-1]))
- parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
- ### END CODE HERE ###
- return parameters
训练一下吧:
- parameters = model(train_X, train_Y, initialization = "zeros")
- print ("On the train set:")
- predictions_train = predict(train_X, train_Y, parameters)
- print ("On the test set:")
- predictions_test = predict(test_X, test_Y, parameters)
观察结果,我们可以很容易的发现,在使用零初始化时,训练过程完全无效。
对于训练集和测试集的预测结果如下:
- print ("predictions_train = " + str(predictions_train))
- print ("predictions_test = " + str(predictions_test))
- plt.title("Model with Zeros initialization")
- axes = plt.gca()
- axes.set_xlim([-1.5,1.5])
- axes.set_ylim([-1.5,1.5])
- plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
从结果和图中,我们可以看出所有的样本的预测结果全部为0。
通常来说,零初始化都会导致神经网络无法打破对称性,最终导致的结构就是无论网络有多少层,最终只能得到和Logistic函数相同的效果。
随机初始化:
上面的分析中,我们已经可以看出随机初始化无法得到一个很好的结果,接下来,我们使用随机初始化来看一下效果吧:
- def initialize_parameters_random(layers_dims):
- """
- Arguments:
- layer_dims -- python array (list) containing the size of each layer.
-
- Returns:
- parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
- W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
- b1 -- bias vector of shape (layers_dims[1], 1)
- ...
- WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
- bL -- bias vector of shape (layers_dims[L], 1)
- """
-
- np.random.seed(3) # This seed makes sure your "random" numbers will be the as ours
- parameters = {}
- L = len(layers_dims) # integer representing the number of layers
-
- for l in range(1, L):
- ### START CODE HERE ### (≈ 2 lines of code)
- parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * 10
- parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
- ### END CODE HERE ###
-
- return parameters
训练一下吧:
- parameters = model(train_X, train_Y, initialization = "random")
- print ("On the train set:")
- predictions_train = predict(train_X, train_Y, parameters)
- print ("On the test set:")
- predictions_test = predict(test_X, test_Y, parameters)
从上面的结果可以看出,样本的训练已经有了效果,模型的预测结果已经不再是全部为零啦!
- print (predictions_train)
- print (predictions_test)
- plt.title("Model with large random initialization")
- axes = plt.gca()
- axes.set_xlim([-1.5,1.5])
- axes.set_ylim([-1.5,1.5])
- plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
从上面的练习中,我们可以观察到:
1.代价函数的初始值很高,这是由于我们选择了较大的初始值。
2.初始化选择的不恰当可能会导致梯度消失或梯度爆炸,这些都会导致优化算法的性能下降。
3.随着迭代次数的增加,最终的分离结果还能有一定的提升。但是如果初始值选择更大的话,性能也会随之降低。
He初始化:
He初始化与Xavire初始化十分类似。
Xavire初始化中,系数为。
He初始化中,系数为。
- def initialize_parameters_he(layers_dims):
- """
- Arguments:
- layer_dims -- python array (list) containing the size of each layer.
-
- Returns:
- parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
- W1 -- weight matrix of shape (layers_dims[1], layers_dims[0])
- b1 -- bias vector of shape (layers_dims[1], 1)
- ...
- WL -- weight matrix of shape (layers_dims[L], layers_dims[L-1])
- bL -- bias vector of shape (layers_dims[L], 1)
- """
-
- np.random.seed(3)
- parameters = {}
- L = len(layers_dims) - 1 # integer representing the number of layers
-
- for l in range(1, L + 1):
- ### START CODE HERE ### (≈ 2 lines of code)
- parameters['W' + str(l)] = np.random.randn(layers_dims[l], layers_dims[l-1]) * np.sqrt(2./layers_dims[l-1])
- parameters['b' + str(l)] = np.zeros((layers_dims[l], 1))
- ### END CODE HERE ###
-
- return parameters
训练一下吧:
- parameters = model(train_X, train_Y, initialization = "he")
- print ("On the train set:")
- predictions_train = predict(train_X, train_Y, parameters)
- print ("On the test set:")
- predictions_test = predict(test_X, test_Y, parameters)
- plt.title("Model with He initialization")
- axes = plt.gca()
- axes.set_xlim([-1.5,1.5])
- axes.set_ylim([-1.5,1.5])
- plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
从上图可以看出,使用He初始时,可以得到一个相对较好的分类结果。
在上面的学习中,我们尝试了三种不同的初始化方法,在相同的超参数和迭代次数的情况下,比较结果如下:
总结一下:
1. 不同的初始化方法可能导致最终不同的性能
2. 随机初始化有助于打破对称,使得不同隐藏层的单元可以学习到不同的参数。
3. 初始化时,初始值不宜过大。
4. He初始化搭配ReLU激活函数常常可以得到不错的效果。
正则化
在深度学习中,如果数据集没有足够大的话,可能会导致一些过拟合的问题。
过拟合导致的结果就是在训练集上有着很高的精确度,但是在遇到新的样本时,精确度下降严重。
为了避免过拟合的问题,接下来我们要讲解的方式就是正则化。
首先,引入相关的库:
- import numpy as np
- import matplotlib.pyplot as plt
- from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
- from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
- import sklearn
- import sklearn.datasets
- import scipy.io
- from testCases import *
-
- %matplotlib inline
- plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
- plt.rcParams['image.interpolation'] = 'nearest'
- plt.rcParams['image.cmap'] = 'gray'
其中,一些库中的函数如下:
- def initialize_parameters(layer_dims):
- """
- Arguments:
- layer_dims -- python array (list) containing the dimensions of each layer in our network
-
- Returns:
- parameters -- python dictionary containing your parameters "W1", "b1", ..., "WL", "bL":
- W1 -- weight matrix of shape (layer_dims[l], layer_dims[l-1])
- b1 -- bias vector of shape (layer_dims[l], 1)
- Wl -- weight matrix of shape (layer_dims[l-1], layer_dims[l])
- bl -- bias vector of shape (1, layer_dims[l])
-
- Tips:
- - For example: the layer_dims for the "Planar Data classification model" would have been [2,2,1].
- This means W1's shape was (2,2), b1 was (1,2), W2 was (2,1) and b2 was (1,1). Now you have to generalize it!
- - In the for loop, use parameters['W' + str(l)] to access Wl, where l is the iterative integer.
- """
-
- np.random.seed(3)
- parameters = {}
- L = len(layer_dims) # number of layers in the network
-
- for l in range(1, L):
- parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l-1]) / np.sqrt(layer_dims[l-1])
- parameters['b' + str(l)] = np.zeros((layer_dims[l], 1))
-
- assert(parameters['W' + str(l)].shape == layer_dims[l], layer_dims[l-1])
- assert(parameters['W' + str(l)].shape == layer_dims[l], 1)
-
-
- return parameters
-
- def compute_cost(a3, Y):
- """
- Implement the cost function
-
- Arguments:
- a3 -- post-activation, output of forward propagation
- Y -- "true" labels vector, same shape as a3
-
- Returns:
- cost - value of the cost function
- """
- m = Y.shape[1]
-
- logprobs = np.multiply(-np.log(a3),Y) + np.multiply(-np.log(1 - a3), 1 - Y)
- cost = 1./m * np.nansum(logprobs)
-
- return cost
-
- def load_2D_dataset():
- data = scipy.io.loadmat('datasets/data.mat')
- train_X = data['X'].T
- train_Y = data['y'].T
- test_X = data['Xval'].T
- test_Y = data['yval'].T
-
- plt.scatter(train_X[0, :], train_X[1, :], c=train_Y, s=40, cmap=plt.cm.Spectral);
-
- return train_X, train_Y, test_X, test_Y
Ps:其中,load_2D_dataset()中的数据集下载地址如下:
请访问http://www.missshi.cn/#/books搜索data.mat进行下载,首次访问Js可能加载微慢,请耐心等候(约10s)。
如果感觉不错希望大家推广下网站哈!不建议大家把训练集直接在QQ群或CSDN上直接分享。
问题描述:
假设你现在是一个AI专家,你需要设计一个模型,可以用于推荐在足球场中守门员将球发至哪个位置可以让本队的球员抢到球的可能性更大。
- train_X, train_Y, test_X, test_Y = load_2D_dataset()
上图中,每一个点对应一个足球落下的位置。
对于蓝色的点,表示我方足球队员抢到球;对于红色的点,则表示对方球员抢到球。
我们的目标是建立一个模型,来找到适合我方球员能抢到球的位置。
模型如下:
- def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
- """
- Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
-
- Arguments:
- X -- input data, of shape (input size, number of examples)
- Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
- learning_rate -- learning rate of the optimization
- num_iterations -- number of iterations of the optimization loop
- print_cost -- If True, print the cost every 10000 iterations
- lambd -- regularization hyperparameter, scalar
- keep_prob - probability of keeping a neuron active during drop-out, scalar.
-
- Returns:
- parameters -- parameters learned by the model. They can then be used to predict.
- """
-
- grads = {}
- costs = [] # to keep track of the cost
- m = X.shape[1] # number of examples
- layers_dims = [X.shape[0], 20, 3, 1]
-
- # Initialize parameters dictionary.
- parameters = initialize_parameters(layers_dims)
-
- # Loop (gradient descent)
-
- for i in range(0, num_iterations):
-
- # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
- if keep_prob == 1:
- a3, cache = forward_propagation(X, parameters)
- elif keep_prob < 1:
- a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
-
- # Cost function
- if lambd == 0:
- cost = compute_cost(a3, Y)
- else:
- cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
-
- # Backward propagation.
- assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout,
- # but this assignment will only explore one at a time
- if lambd == 0 and keep_prob == 1:
- grads = backward_propagation(X, Y, cache)
- elif lambd != 0:
- grads = backward_propagation_with_regularization(X, Y, cache, lambd)
- elif keep_prob < 1:
- grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
-
- # Update parameters.
- parameters = update_parameters(parameters, grads, learning_rate)
-
- # Print the loss every 10000 iterations
- if print_cost and i % 10000 == 0:
- print("Cost after iteration {}: {}".format(i, cost))
- if print_cost and i % 1000 == 0:
- costs.append(cost)
-
- # plot the cost
- plt.plot(costs)
- plt.ylabel('cost')
- plt.xlabel('iterations (x1,000)')
- plt.title("Learning rate =" + str(learning_rate))
- plt.show()
-
- return parameters
首先,我们使用无正则化的模型进行训练:
- parameters = model(train_X, train_Y)
- print ("On the training set:")
- predictions_train = predict(train_X, train_Y, parameters)
- print ("On the test set:")
- predictions_test = predict(test_X, test_Y, parameters)
我们可以看到,对于训练集,精确度为94%;而对于测试集,精确度为91.5%。
接下来,我们将分割曲线画出来:
- plt.title("Model without regularization")
- axes = plt.gca()
- axes.set_xlim([-0.75,0.40])
- axes.set_ylim([-0.75,0.65])
- plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
从图中可以看出,在无正则化时,分割曲线有了明显的过拟合特性。
接下来,我们使用L2方法进行正则化,相关的实现函数如下:
- def compute_cost_with_regularization(A3, Y, parameters, lambd):
- """
- Implement the cost function with L2 regularization. See formula (2) above.
-
- Arguments:
- A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
- Y -- "true" labels vector, of shape (output size, number of examples)
- parameters -- python dictionary containing parameters of the model
-
- Returns:
- cost - value of the regularized loss function (formula (2))
- """
- m = Y.shape[1]
- W1 = parameters["W1"]
- W2 = parameters["W2"]
- W3 = parameters["W3"]
-
- cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
-
- ### START CODE HERE ### (approx. 1 line)
- L2_regularization_cost = 1.0 / m * lambd / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
- ### END CODER HERE ###
-
- cost = cross_entropy_cost + L2_regularization_cost
-
- return cost
-
- def backward_propagation_with_regularization(X, Y, cache, lambd):
- """
- Implements the backward propagation of our baseline model to which we added an L2 regularization.
-
- Arguments:
- X -- input dataset, of shape (input size, number of examples)
- Y -- "true" labels vector, of shape (output size, number of examples)
- cache -- cache output from forward_propagation()
- lambd -- regularization hyperparameter, scalar
-
- Returns:
- gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
- """
-
- m = X.shape[1]
- (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
-
- dZ3 = A3 - Y
-
- ### START CODE HERE ### (approx. 1 line)
- dW3 = 1./m * np.dot(dZ3, A2.T) + lambd/m*W3
- ### END CODE HERE ###
- db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
-
- dA2 = np.dot(W3.T, dZ3)
- dZ2 = np.multiply(dA2, np.int64(A2 > 0))
- ### START CODE HERE ### (approx. 1 line)
- dW2 = 1./m * np.dot(dZ2, A1.T) + lambd/m*W2
- ### END CODE HERE ###
- db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
-
- dA1 = np.dot(W2.T, dZ2)
- dZ1 = np.multiply(dA1, np.int64(A1 > 0))
- ### START CODE HERE ### (approx. 1 line)
- dW1 = 1./m * np.dot(dZ1, X.T) + lambd/m*W1
- ### END CODE HERE ###
- db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
-
- gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
- "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
- "dZ1": dZ1, "dW1": dW1, "db1": db1}
-
- return gradients
接下来,我们用L2正则模型进行训练:
- parameters = model(train_X, train_Y, lambd = 0.7)
- print ("On the train set:")
- predictions_train = predict(train_X, train_Y, parameters)
- print ("On the test set:")
- predictions_test = predict(test_X, test_Y, parameters)
分割曲线如下:
- plt.title("Model with L2-regularization")
- axes = plt.gca()
- axes.set_xlim([-0.75,0.40])
- axes.set_ylim([-0.75,0.65])
- plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
最后,我们使用Dropout来进行正则化,Dropout的原理就是每次迭代过程中随机将其中的一些神经元失效。
Dropout的实现如下:
- def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
- """
- Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
-
- Arguments:
- X -- input dataset, of shape (2, number of examples)
- parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
- W1 -- weight matrix of shape (20, 2)
- b1 -- bias vector of shape (20, 1)
- W2 -- weight matrix of shape (3, 20)
- b2 -- bias vector of shape (3, 1)
- W3 -- weight matrix of shape (1, 3)
- b3 -- bias vector of shape (1, 1)
- keep_prob - probability of keeping a neuron active during drop-out, scalar
-
- Returns:
- A3 -- last activation value, output of the forward propagation, of shape (1,1)
- cache -- tuple, information stored for computing the backward propagation
- """
-
- np.random.seed(1)
-
- # retrieve parameters
- W1 = parameters["W1"]
- b1 = parameters["b1"]
- W2 = parameters["W2"]
- b2 = parameters["b2"]
- W3 = parameters["W3"]
- b3 = parameters["b3"]
-
- # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
- Z1 = np.dot(W1, X) + b1
- A1 = relu(Z1)
- ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above.
- D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
- D1 = (D1 < keep_prob) # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
- A1 = A1 * D1 # Step 3: shut down some neurons of A1
- A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
- ### END CODE HERE ###
- Z2 = np.dot(W2, A1) + b2
- A2 = relu(Z2)
- ### START CODE HERE ### (approx. 4 lines)
- D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
- D2 = (D2 < keep_prob) # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
- A2 = A2 * D2 # Step 3: shut down some neurons of A2
- A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
- ### END CODE HERE ###
- Z3 = np.dot(W3, A2) + b3
- A3 = sigmoid(Z3)
-
- cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
-
- return A3, cache
-
- def backward_propagation_with_dropout(X, Y, cache, keep_prob):
- """
- Implements the backward propagation of our baseline model to which we added dropout.
-
- Arguments:
- X -- input dataset, of shape (2, number of examples)
- Y -- "true" labels vector, of shape (output size, number of examples)
- cache -- cache output from forward_propagation_with_dropout()
- keep_prob - probability of keeping a neuron active during drop-out, scalar
-
- Returns:
- gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
- """
-
- m = X.shape[1]
- (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
-
- dZ3 = A3 - Y
- dW3 = 1./m * np.dot(dZ3, A2.T)
- db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
- dA2 = np.dot(W3.T, dZ3)
- ### START CODE HERE ### (≈ 2 lines of code)
- dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
- dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
- ### END CODE HERE ###
- dZ2 = np.multiply(dA2, np.int64(A2 > 0))
- dW2 = 1./m * np.dot(dZ2, A1.T)
- db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
-
- dA1 = np.dot(W2.T, dZ2)
- ### START CODE HERE ### (≈ 2 lines of code)
- dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation
- dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
- ### END CODE HERE ###
- dZ1 = np.multiply(dA1, np.int64(A1 > 0))
- dW1 = 1./m * np.dot(dZ1, X.T)
- db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
-
- gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
- "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
- "dZ1": dZ1, "dW1": dW1, "db1": db1}
-
- return gradients
接下来,使用带有Dropout的模型来训练吧:
- parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)
-
- print ("On the train set:")
- predictions_train = predict(train_X, train_Y, parameters)
- print ("On the test set:")
- predictions_test = predict(test_X, test_Y, parameters)
查看结果,我们可以发现添加Dropout后,对于测试集,精确度提升到了95%。
接下来,我们来看一下分割曲线图吧:
- plt.title("Model with dropout")
- axes = plt.gca()
- axes.set_xlim([-0.75,0.40])
- axes.set_ylim([-0.75,0.65])
- plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
对比三种方法,结果如下:
梯度检查
在接下来的内容,我们将学习怎么样进行梯度检查。
假设你现在是一个全球移动支付团队中的一员,现在需要建立一个深度学习模型去判断用户账户在进行付款的时候是否是被黑客入侵的。
但是,在我们执行反向传播的计算过程中,反向传播函数的计算过程是比较复杂的。为了验证我们得到的反向传播函数是否正确,现在你需要编写一些代码来验证反向传播函数的正确性。
首先,引入相关的库:
- import numpy as np
- from testCases import *
- from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
其中,涉及到的一些函数如下:
- def dictionary_to_vector(parameters):
- """
- Roll all our parameters dictionary into a single vector satisfying our specific required shape.
- """
- keys = []
- count = 0
- for key in ["W1", "b1", "W2", "b2", "W3", "b3"]:
-
- # flatten parameter
- new_vector = np.reshape(parameters[key], (-1,1))
- keys = keys + [key]*new_vector.shape[0]
-
- if count == 0:
- theta = new_vector
- else:
- theta = np.concatenate((theta, new_vector), axis=0)
- count = count + 1
-
- return theta, keys
-
- def vector_to_dictionary(theta):
- """
- Unroll all our parameters dictionary from a single vector satisfying our specific required shape.
- """
- parameters = {}
- parameters["W1"] = theta[:20].reshape((5,4))
- parameters["b1"] = theta[20:25].reshape((5,1))
- parameters["W2"] = theta[25:40].reshape((3,5))
- parameters["b2"] = theta[40:43].reshape((3,1))
- parameters["W3"] = theta[43:46].reshape((1,3))
- parameters["b3"] = theta[46:47].reshape((1,1))
-
- return parameters
-
- def gradients_to_vector(gradients):
- """
- Roll all our gradients dictionary into a single vector satisfying our specific required shape.
- """
-
- count = 0
- for key in ["dW1", "db1", "dW2", "db2", "dW3", "db3"]:
- # flatten parameter
- new_vector = np.reshape(gradients[key], (-1,1))
-
- if count == 0:
- theta = new_vector
- else:
- theta = np.concatenate((theta, new_vector), axis=0)
- count = count + 1
-
- return theta
接下来,我们首先看下一维线性模型的梯度检查:
前向传播过程:
- def forward_propagation(x, theta):
- """
- Implement the linear forward propagation (compute J) presented in Figure 1 (J(theta) = theta * x)
-
- Arguments:
- x -- a real-valued input
- theta -- our parameter, a real number as well
-
- Returns:
- J -- the value of function J, computed using the formula J(theta) = theta * x
- """
-
- ### START CODE HERE ### (approx. 1 line)
- J = theta * x
- ### END CODE HERE ###
-
- return J
反向传播过程:
- def backward_propagation(x, theta):
- """
- Computes the derivative of J with respect to theta (see Figure 1).
-
- Arguments:
- x -- a real-valued input
- theta -- our parameter, a real number as well
-
- Returns:
- dtheta -- the gradient of the cost with respect to theta
- """
-
- ### START CODE HERE ### (approx. 1 line)
- dtheta = x
- ### END CODE HERE ###
-
- return dtheta
接下来,梯度检查的步骤如下:
接下来,计算梯度的反向传播值。
最后计算误差:
当difference小于10-7时,我们通常认为我们计算的结果是正确的。
- def gradient_check(x, theta, epsilon = 1e-7):
- """
- Implement the backward propagation presented in Figure 1.
-
- Arguments:
- x -- a real-valued input
- theta -- our parameter, a real number as well
- epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
-
- Returns:
- difference -- difference (2) between the approximated gradient and the backward propagation gradient
- """
-
- # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit.
- ### START CODE HERE ### (approx. 5 lines)
- thetaplus = theta + epsilon # Step 1
- thetaminus = theta - epsilon # Step 2
- J_plus = x * (theta + epsilon) # Step 3
- J_minus = x * (theta - epsilon) # Step 4
- gradapprox = (J_plus - J_minus) / 2 / epsilon # Step 5
- ### END CODE HERE ###
-
- # Check if gradapprox is close enough to the output of backward_propagation()
- ### START CODE HERE ### (approx. 1 line)
- grad = backward_propagation(x, theta)
- ### END CODE HERE ###
-
- ### START CODE HERE ### (approx. 1 line)
- numerator = np.linalg.norm(grad - gradapprox) # Step 1'
- denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
- difference = numerator / denominator # Step 3'
- ### END CODE HERE ###
-
- if difference < 1e-7:
- print ("The gradient is correct!")
- else:
- print ("The gradient is wrong!")
-
- return difference
测试一下吧:
- x, theta = 2, 4
- difference = gradient_check(x, theta)
- print("difference = " + str(difference))
接下来,我们来看下对于N维的梯度检查吧:
前向传播如下:
- def forward_propagation_n(X, Y, parameters):
- """
- Implements the forward propagation (and computes the cost) presented in Figure 3.
-
- Arguments:
- X -- training set for m examples
- Y -- labels for m examples
- parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
- W1 -- weight matrix of shape (5, 4)
- b1 -- bias vector of shape (5, 1)
- W2 -- weight matrix of shape (3, 5)
- b2 -- bias vector of shape (3, 1)
- W3 -- weight matrix of shape (1, 3)
- b3 -- bias vector of shape (1, 1)
-
- Returns:
- cost -- the cost function (logistic cost for one example)
- """
-
- # retrieve parameters
- m = X.shape[1]
- W1 = parameters["W1"]
- b1 = parameters["b1"]
- W2 = parameters["W2"]
- b2 = parameters["b2"]
- W3 = parameters["W3"]
- b3 = parameters["b3"]
-
- # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
- Z1 = np.dot(W1, X) + b1
- A1 = relu(Z1)
- Z2 = np.dot(W2, A1) + b2
- A2 = relu(Z2)
- Z3 = np.dot(W3, A2) + b3
- A3 = sigmoid(Z3)
-
- # Cost
- logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y)
- cost = 1./m * np.sum(logprobs)
-
- cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
-
- return cost, cache
反向传播如下:
- def backward_propagation_n(X, Y, cache):
- """
- Implement the backward propagation presented in figure 2.
-
- Arguments:
- X -- input datapoint, of shape (input size, 1)
- Y -- true "label"
- cache -- cache output from forward_propagation_n()
-
- Returns:
- gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables.
- """
-
- m = X.shape[1]
- (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
-
- dZ3 = A3 - Y
- dW3 = 1./m * np.dot(dZ3, A2.T)
- db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
-
- dA2 = np.dot(W3.T, dZ3)
- dZ2 = np.multiply(dA2, np.int64(A2 > 0))
- dW2 = 1./m * np.dot(dZ2, A1.T)
- db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
-
- dA1 = np.dot(W2.T, dZ2)
- dZ1 = np.multiply(dA1, np.int64(A1 > 0))
- dW1 = 1./m * np.dot(dZ1, X.T)
- db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
-
- gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
- "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
- "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
-
- return gradients
对于多维度的情况下,求导的公式仍然如下:
然而, 此时theta不再是一个标量,而是一个矢量了。
类似的是,对于多维度而言,实现的过程如下:
1. 计算gradapprox
2. 计算梯度
3. 计算误差:
实现过程如下:
- def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
- """
- Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
-
- Arguments:
- parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
- grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters.
- x -- input datapoint, of shape (input size, 1)
- y -- true "label"
- epsilon -- tiny shift to the input to compute approximated gradient with formula(1)
-
- Returns:
- difference -- difference (2) between the approximated gradient and the backward propagation gradient
- """
-
- # Set-up variables
- parameters_values, _ = dictionary_to_vector(parameters)
- grad = gradients_to_vector(gradients)
- num_parameters = parameters_values.shape[0]
- J_plus = np.zeros((num_parameters, 1))
- J_minus = np.zeros((num_parameters, 1))
- gradapprox = np.zeros((num_parameters, 1))
-
- # Compute gradapprox
- for i in range(num_parameters):
-
- # Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]".
- # "_" is used because the function you have to outputs two parameters but we only care about the first one
- ### START CODE HERE ### (approx. 3 lines)
- thetaplus = np.copy(parameters_values) # Step 1
- thetaplus[i][0] = thetaplus[i][0] + epsilon # Step 2
- J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3
- ### END CODE HERE ###
-
- # Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]".
- ### START CODE HERE ### (approx. 3 lines)
- thetaminus = np.copy(parameters_values) # Step 1
- thetaminus[i][0] = thetaminus[i][0] - epsilon # Step 2
- J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3
- ### END CODE HERE ###
-
- # Compute gradapprox[i]
- ### START CODE HERE ### (approx. 1 line)
- gradapprox[i] = (J_plus[i] - J_minus[i]) / 2 / epsilon
- ### END CODE HERE ###
-
- # Compare gradapprox to backward propagation gradients by computing difference.
- ### START CODE HERE ### (approx. 1 line)
- numerator = np.linalg.norm(grad - gradapprox) # Step 1'
- denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2'
- difference = numerator / denominator # Step 3' # Step 3'
- ### END CODE HERE ###
-
- if difference > 1e-7:
- print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m")
- else:
- print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m")
-
- return difference
到此为止,本文的内容就完成啦!接下来小伙伴们可以自己多多勤加练习!
更多更详细的内容,请访问原创网站:
http://www.missshi.cn/api/view/blog/59bbcacae519f50d04000202
Ps:初次访问由于js文件较大,请耐心等候(8s左右)