1、将如下代码段保存成optim_utils.py (其用于本此作业的函数主要有:mini_init,random_mini_batches,initialize_velocity,initialize_adam,update_parameters_with_momentum,update_parameters_with_adam)。实现的主要功能是:batch gradient descent, mini_batch gradient descent,stochestic gradient descent,momentum,adam。相应代码段如下:
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy.io
import sklearn.datasets
import sklearn.linear_model
import math
#%matplotlib inline
def load_data_cat():
x_train_set = h5py.File('datasets/train_catvnoncat.h5','r')
x_train = np.array(x_train_set['train_set_x'][:])
y_train = np.array(x_train_set['train_set_y'][:])
x_test_set = h5py.File('datasets/test_catvnoncat.h5','r')
x_test = np.array(x_test_set['test_set_x'][:])
y_test = np.array(x_test_set['test_set_y'][:])
num_px = x_train.shape[1]
x_train = x_train.reshape(-1,num_px*num_px*3).T
x_test = x_test.reshape(-1,num_px*num_px*3).T
y_train = y_train.reshape(1,-1)
y_test = y_test.reshape(1,-1)
label_names = np.array(x_test_set['list_classes'][:])
x_train = x_train / 255.0
x_test = x_test / 255.0
return x_train,x_test,y_train,y_test,label_names
def load_2D_dataset():
data = scipy.io.loadmat('datasets/data.mat')
x_train = data['X'].T
y_train = data['y'].T
x_test = data['Xval'].T
y_test = data['yval'].T
plt.figure(1)
plt.scatter(x_train[0, :], x_train[1, :], c=y_train, s=40, cmap=plt.cm.Spectral);
plt.show()
return x_train,x_test,y_train,y_test
def load_data():
from sklearn.datasets import make_circles
np.random.seed(1)
x_train,y_train = make_circles(n_samples=300, shuffle=True,noise = 0.05,factor=0.8)
np.random.seed(2)
x_test,y_test = make_circles(n_samples=100,shuffle=True,noise=0.05,factor=0.8)
#visualize data
plt.scatter(x_train[:,0],x_train[:,1],c=y_train,s=40,cmap = plt.cm.Spectral)
x_train = x_train.T
y_train = y_train.reshape(1,-1)
x_test = x_test.T
y_test = y_test.reshape(1,-1)
return x_train,x_test,y_train,y_test
def load_data_moon():
np.random.seed(3)
x_train,y_train = sklearn.datasets.make_moons(n_samples=300,noise=0.2)
np.random.seed(4)
x_test, y_test = sklearn.datasets.make_moons(n_samples=100,noise=0.2)
# Visualize the data
plt.scatter(x_train[:,0], x_train[:,1],c=y_train,s=40,cmap=plt.cm.Spectral)
x_train = x_train.T
x_test = x_test.T
y_train = y_train.reshape(1,-1)
y_test = y_test.reshape(1,-1)
return x_train,x_test,y_train,y_test
def plot_decision_boundary(model,X,Y):
'''
Plot the decision boundary
'''
x_min,x_max = X[0,:].min()-1.0,X[0,:].max()+1.0
y_min,y_max = X[1,:].min()-1.0,X[1,:].max()+1.0
h = 0.01
xx,yy = np.meshgrid(np.arange(x_min,x_max,h),np.arange(y_min,y_max,h))
z = model(np.c_[xx.ravel(),yy.ravel()].T)
z = z.reshape(xx.shape)
plt.contourf(xx,yy,z,cmap=plt.cm.Spectral)
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.xlim([-2.0,2.0])
plt.ylim([-2.0,2.0])
plt.scatter(X[0,:],X[1,:],c=Y,cmap=plt.cm.Spectral)
plt.show()
def model_pred(X,parameters,activation_list):
'''
This function used to predict the used parameters
'''
n_layers = len(activation_list)
m_samples = X.shape[1]
caches = []
for i in range(n_layers):
cache_temp = {}
if (i == 0):
cache_temp['Z'] = X
cache_temp['A'] = X
caches.append(cache_temp)
else:
cache_temp['Z'] = np.random.randn(parameters[i]['b'].shape[0],m_samples)
cache_temp['A'] = np.random.randn(parameters[i]['b'].shape[0],m_samples)
caches.append(cache_temp)
caches = n_layers_forward(parameters,caches, activation_list)
predictions = caches[n_layers-1]['A'].reshape(-1,1)
return predictions
#image datas
#x_train, x_test,y_train,y_test,label_names = load_data_cat()
# data used in make_circles
# x_train, x_test,y_train,y_test = load_data()
# #plt.show()
# units_list = [x_train.shape[0],3,2,1]
# activation_list = ['None','relu','relu','sigmoid']
# learning_rate = 0.01
def sigmoid_forward(Z):
'''
sigmoid function do calc of z =1.0/(1+exp(-x))
return z
'''
A = 1.0/(1.0 + np.exp(-Z))
assert(Z.shape == A.shape)
return A
def sigmoid_backward(dA,Z):
'''
Inputs:
dA: the backprop derivations of A
Z: in forwardprop A = g(Z)
return:
dZ: the gradient of Z
'''
temp_A = sigmoid_forward(Z)
# dZ = dA*A(1-A)
dZ = np.multiply(dA,np.multiply(temp_A,(1.0-temp_A)))
assert(dA.shape == dZ.shape)
return dZ
def relu_forward(Z):
'''
relu calc
'''
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
return A
def relu_backward(dA,Z):
'''
relu backprop calc
'''
dZ = np.copy(dA)
dZ[Z<0] = 0.0
assert(dA.shape == dZ.shape)
return dZ
def init(X,units_list,initialization='random'):
'''
function used to init variables about to use
Inputs:
X: inputs values used to train model
units_list: list structure ,length is layer number, values represents units names(inputs as the 0 layers as first layers)
Outputs:
parameters : W,b in every layers
caches: Z,A in every layers
gradients : dZ,dA,dW,db in every layers
'''
np.random.seed(1)
n_layers = len(units_list)
m_samples = X.shape[1]
parameters = []
caches = []
gradients = []
init_scaler = []
for i in range(n_layers):
if i == 0:
init_scaler.append(0.01)
else:
if initialization == 'zeros':
init_scaler.append(0.0)
elif initialization == 'he':
init_scaler.append(np.sqrt(2.0/np.float(units_list[i-1])))
else :
init_scaler.append(1.0)
# init of matrix
for i in range(n_layers):
param_temp = {}
cache_temp = {}
grad_temp = {}
if (i==0):
print('test of init_scaler',init_scaler[i],init_scaler)
param_temp['W'] = np.random.randn(units_list[i],units_list[i])*init_scaler[i] # will not used
param_temp['b'] = np.random.randn(units_list[i],1)*init_scaler[i] # will not used
cache_temp['Z'] = X # will not be used
cache_temp['A'] = X #!!!!!! trainning values important
grad_temp['dW'] = np.random.randn(units_list[i],units_list[i]) # will not used
grad_temp['db'] = np.random.randn(units_list[i]) # will not used
grad_temp['dA'] = np.random.randn(X.shape[0],X.shape[1]) # will not used
grad_temp['dZ'] = np.random.randn(X.shape[0],X.shape[1]) # will not used
parameters.append(param_temp)
caches.append(cache_temp)
gradients.append(grad_temp)
else:
param_temp['W'] = np.random.randn(units_list[i],units_list[i-1])*init_scaler[i]
param_temp['b'] = np.random.randn(units_list[i],1)*init_scaler[i]
cache_temp['Z'] = np.random.randn(units_list[i],m_samples)
cache_temp['A'] = np.random.randn(units_list[i],m_samples)
grad_temp['dW'] = np.random.randn(units_list[i],units_list[i-1])
grad_temp['db'] = np.random.randn(units_list[i],1)
grad_temp['dA'] = np.random.randn(units_list[i],m_samples)
grad_temp['dZ'] = np.random.randn(units_list[i],m_samples)
parameters.append(param_temp)
caches.append(cache_temp)
gradients.append(grad_temp)
return parameters, caches, gradients
def mini_init(X,units_list):
'''
this function used to allocate memory of gradients and caches used by functions
return :
caches
gradients
'''
n_layers = len(units_list)
m_samples = X.shape[1]
caches = []
gradients = []
for i in range(n_layers):
cache_temp = {}
grad_temp = {}
if(i==0):
cache_temp['Z'] = X
cache_temp['A'] = X
grad_temp['dW'] = np.zeros([units_list[i],units_list[i]])
grad_temp['db'] = np.zeros([units_list[i],1])
grad_temp['dZ'] = np.zeros([units_list[i],m_samples])
grad_temp['dA'] = np.zeros([units_list[i],m_samples])
caches.append(cache_temp)
gradients.append(grad_temp)
else:
cache_temp['Z'] = np.zeros([units_list[i],m_samples])
cache_temp['A'] = np.zeros([units_list[i],m_samples])
grad_temp['dW'] = np.zeros([units_list[i],units_list[i-1]])
grad_temp['db'] = np.zeros([units_list[i],1])
grad_temp['dZ'] = np.zeros([units_list[i],m_samples])
grad_temp['dA'] = np.zeros([units_list[i],m_samples])
caches.append(cache_temp)
gradients.append(grad_temp)
# allocated memory checking
# for i in range(n_layers):
# print('this is {0}th layers out'.format(i))
# print('shape Z:{0},shape A:{1},shape:dW{2},shape:db{3}'.format(caches[i]['Z'].shape,caches[i]['A'].shape,\
# gradients[i]['dW'].shape,gradients[i]['db'].shape))
# print('shape dZ:{0},shape dA:{1}'.format(gradients[i]['dZ'].shape,gradients[i]['dA'].shape))
return caches, gradients
def init_drop(X,units_list,drop_list,initialization='random'):
'''
function used to init variables about to use
Inputs:
X: inputs values used to train model
units_list: list structure ,length is layer number, values represents units names(inputs as the 0 layers as first layers)
Outputs:
parameters : W,b in every layers
caches: Z,A in every layers
gradients : dZ,dA,dW,db in every layers
'''
drop_index = [drop_list[i][0] for i in range(len(drop_list))]
drop_prop = [drop_list[i][1] for i in range(len(drop_list))]
np.random.seed(1)
n_layers = len(units_list)
m_samples = X.shape[1]
parameters = []
caches = []
gradients = []
init_scaler = []
for i in range(n_layers):
if i == 0:
init_scaler.append(0.01)
else:
if initialization == 'zeros':
init_scaler.append(0.0)
elif initialization == 'he':
init_scaler.append(np.sqrt(2.0/np.float(units_list[i-1])))
else :
init_scaler.append(1.0)
init_scaler = np.array(init_scaler)
# init of matrix
for i in range(n_layers):
param_temp = {}
cache_temp = {}
grad_temp = {}
if (i==0):
print('test of init_scaler',init_scaler[i],init_scaler)
param_temp['W'] = np.random.randn(units_list[i],units_list[i])*init_scaler[i] # will not used
param_temp['b'] = np.random.randn(units_list[i],1)*init_scaler[i] # will not used
cache_temp['Z'] = X # will not be used
cache_temp['A'] = X #!!!!!! trainning values important
cache_temp['D'] = np.random.rand(X.shape[0],X.shape[1])
grad_temp['dW'] = np.random.randn(units_list[i],units_list[i]) # will not used
grad_temp['db'] = np.random.randn(units_list[i]) # will not used
grad_temp['dA'] = np.random.randn(X.shape[0],X.shape[1]) # will not used
grad_temp['dZ'] = np.random.randn(X.shape[0],X.shape[1]) # will not used
parameters.append(param_temp)
caches.append(cache_temp)
gradients.append(grad_temp)
else:
param_temp['W'] = np.random.randn(units_list[i],units_list[i-1])*init_scaler[i]
param_temp['b'] = np.random.randn(units_list[i],1)*init_scaler[i]
cache_temp['Z'] = np.random.randn(units_list[i],m_samples)
cache_temp['A'] = np.random.randn(units_list[i],m_samples)
cache_temp['D'] = np.random.rand(units_list[i],m_samples)
if i in drop_index:
index = drop_index.index(i)
prop = drop_prop[index]
cache_temp['D'][cache_temp['D']<=prop] = 1.0
cache_temp['D'][cache_temp['D']!=1.0] = 0.0
grad_temp['dW'] = np.random.randn(units_list[i],units_list[i-1])
grad_temp['db'] = np.random.randn(units_list[i],1)
grad_temp['dA'] = np.random.randn(units_list[i],m_samples)
grad_temp['dZ'] = np.random.randn(units_list[i],m_samples)
parameters.append(param_temp)
caches.append(cache_temp)
gradients.append(grad_temp)
return parameters, caches, gradients
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
# math.floor(num) return an integer of number: like 27.5 as 27
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[:,k*mini_batch_size:(k+1)*mini_batch_size]
mini_batch_Y = shuffled_Y[:,k*mini_batch_size:(k+1)*mini_batch_size]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X=shuffled_X[:,num_complete_minibatches*mini_batch_size:]
mini_batch_Y=shuffled_Y[:,num_complete_minibatches*mini_batch_size:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# checking batch size:
# for i in range(len(mini_batches)):
# print('this is {0}th batch!!'.format(i))
# print(mini_batches[i][0].shape,mini_batches[i][1].shape)
return mini_batches
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
n_layers = len(parameters)
velocity = []
for i in range(n_layers):
v_temp = {}
v_temp['dW'] = np.zeros([parameters[i]['W'].shape[0],parameters[i]['W'].shape[1]])
v_temp['db'] = np.zeros([parameters[i]['b'].shape[0],parameters[i]['b'].shape[1]])
velocity.append(v_temp)
return velocity
def initialize_adam(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python list elements isdictionary .
parameters[l]['W' ] = Wl
parameters[l]['b'] = bl
Returns:
velocity -- python list contain dictionary current velocity.
v['dW' ] = velocity of dWl
v['db' ] = velocity of dbl
"""
n_layers = len(parameters)
velocity = []
squared = []
for i in range(n_layers):
v_temp = {}
s_temp = {}
v_temp['dW'] = np.zeros([parameters[i]['W'].shape[0],parameters[i]['W'].shape[1]])
v_temp['db'] = np.zeros([parameters[i]['b'].shape[0],parameters[i]['b'].shape[1]])
s_temp['dW'] = np.zeros([parameters[i]['W'].shape[0],parameters[i]['W'].shape[1]])
s_temp['db'] = np.zeros([parameters[i]['b'].shape[0],parameters[i]['b'].shape[1]])
velocity.append(v_temp)
squared.append(s_temp)
return velocity,squared
def linear_forward(X,W,b):
'''
calc the preocess w*x + b
'''
Z = np.dot(W,X) + b
assert(Z.shape[0] == W.shape[0])
assert(Z.shape[1] == X.shape[1])
return Z
def linear_activation_forward(A_prev,W,b,activation='None'):
'''
function is a single layer calc
return cache parameters
outputs cache values of Z,A
'''
Z = linear_forward(A_prev,W,b)
if(activation == 'relu'):
A = relu_forward(Z)
elif(activation == 'sigmoid'):
A = sigmoid_forward(Z)
else:
A = Z
print('wrong in activation function!!!')
assert(Z.shape == A.shape)
return Z,A
def n_layers_forward(parameters,caches,activation_list):
'''
this function calc the caches use w,b and Aprev
'''
n_layers = len(activation_list)
for i in range(1,n_layers):
A_prev = caches[i-1]['A']
W = parameters[i]['W']
b = parameters[i]['b']
activation = activation_list[i]
caches[i]['Z'], caches[i]['A'] = linear_activation_forward(A_prev,W,b,activation)
return caches
def n_layers_forward_drop(parameters,caches,activation_list,drop_list):
'''
this function calc the caches use w,b and Aprev
'''
drop_index = [drop_list[i][0] for i in range(len(drop_list))]
drop_prop = [drop_list[i][1] for i in range(len(drop_list))]
n_layers = len(activation_list)
for i in range(1,n_layers):
A_prev = caches[i-1]['A']
W = parameters[i]['W']
b = parameters[i]['b']
activation = activation_list[i]
caches[i]['Z'], caches[i]['A'] = linear_activation_forward(A_prev,W,b,activation)
if i in drop_index:
index = drop_index.index(i)
prop = drop_prop[index]
# D = np.random.rand(caches[i]['A'].shape[0],caches[i]['A'].shape[1])
# D[D<=prop] = 1.0
# D[D !=1.0] = 0.0
D = caches[i]['D']
assert(D.shape == caches[i]['Z'].shape)
caches[i]['A'] = np.multiply(caches[i]['A'],D)/prop
#caches[i]['D'] = D
return caches
def linear_backward(dZ,Aprev):
'''
single layers in linear calc backprop calc
Inputs:
dZ: gradients of loss to ith layers' Z
Aprev: cache values in (i-1) layers' matrix A
Outputs:
dW: gradients of loss to ith layers' W
db: gradients of loss to ith layers' b
'''
m_samples = dZ.shape[1]
dW = np.dot(dZ, Aprev.T)/np.float(m_samples)
db = np.sum(dZ,axis=1,keepdims=True)/np.float(m_samples)
return dW, db
def linear_activation_backward(Z,Aprev,Wplus,dZplus,activation):
'''
used to calc single layer's dZ,dA,dW,db
Inputs:
Z : matrix of i th layers
Aprev: matrix of previous layers
Wplus: parameters of W of i+1 th layers
dZplus: dz gradients of (i+1)th layers
activation: activation function
Outputs:
dA: dA gradients of i th layers
dZ: dZ gradients of i th layers
dW: dW gradients of i th layers
db: db gradients of i th layers
'''
dA = np.dot(Wplus.T,dZplus)
if (activation == 'sigmoid'):
dZ = sigmoid_backward(dA,Z)
elif(activation == 'relu'):
dZ = relu_backward(dA,Z)
else:
dZ = dA
print('Wrong in calc dz,da,dw,db')
dW,db = linear_backward(dZ,Aprev)
return dZ,dA,dW,db
def n_layers_backward(Y,parameters,caches,gradients, activation_list):
'''
used to calc the n_layers gradients
Inputs:
parameters: w,b every layer model to learn
caches: Z,A every layers
gradients: used as inputs
activation_list: every layers activation_function
Outputs:
gradients: cost function gradients to every in dA,dZ,dW,db
'''
n_layers = len(activation_list)
for i in range(n_layers-1,0,-1):
activation = activation_list[i]
Z = caches[i]['Z']
A = caches[i]['A']
Aprev = caches[i-1]['A']
if (i == n_layers -1):
gradients[i]['dA'] = -np.divide(Y,A) + np.divide((1.0-Y),(1.0-A))
dA = gradients[i]['dA']
gradients[i]['dZ'] = sigmoid_backward(dA,Z)
dZ = gradients[i]['dZ']
gradients[i]['dW'],gradients[i]['db'] = linear_backward(dZ,Aprev)
else:
Wplus = parameters[i+1]['W']
dZplus = gradients[i+1]['dZ']
gradients[i]['dZ'],gradients[i]['dA'],gradients[i]['dW'],gradients[i]['db'] = \
linear_activation_backward(Z,Aprev,Wplus,dZplus,activation)
return gradients
def update_parameters(parameters,gradients,learning_rate):
'''
function used to update parameters w,b
Inputs:
parameters,gradients,learning_rate
Outputs:
parameters: updated parameters
'''
n_layers = len(parameters)
#print('shape of learning_rate',learning_rate)
for i in range(1,n_layers):
assert(parameters[i]['W'].shape == gradients[i]['dW'].shape)
assert(parameters[i]['b'].shape == gradients[i]['db'].shape)
parameters[i]['W'] += -learning_rate*gradients[i]['dW']
parameters[i]['b'] += -learning_rate*gradients[i]['db']
return parameters
def update_parameters_with_momentum(parameters, gradients, v, beta=0.0, learning_rate=0.075):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
n_layers = len(parameters) # number of layers in the neural networks
# Momentum update for each parameter
for i in range(n_layers):
v[i]['dW'] = beta*v[i]['dW'] + (1.0-beta)*gradients[i]['dW']
#v[i]['dW'] = v[i]['dW']/(1.0-beta)
v[i]['db'] = beta*v[i]['db'] + (1.0-beta)*gradients[i]['db']
#v[i]['db'] = v[i]['db']/(1.0-beta)
parameters[i]['W'] += -learning_rate*v[i]['dW']
parameters[i]['b'] += -learning_rate*v[i]['db']
return parameters, v
def update_parameters_with_adam(parameters, gradients, v, s, t, learning_rate=0.0075,beta1=0.8,beta2=0.95,epsol=1e-8):
"""
Update parameters using Momentum
Arguments:
parameters -- python list elements is dictionary containing your parameters:
parameters['W' ] = Wl
parameters['b' ] = bl
grads -- python list elements is dictionary containing your gradients for each parameters:
grads['dW' ] = dWl
grads['db' ] = dbl
v -- python list elements is dictionary containing the current velocity:
v['dW' ] = ...
v['db' ] = ...
beta1 -- the momentum hyperparameter, scalar
beta2 -- the rms hyperparameter,scaler
learning_rate -- the learning rate, scalar
epsol -- the rms hyperparameter, scaler
Returns:
parameters -- python dictionary containing your updated parameters
v -- python list elements is dictionary containing your updated velocities
s -- python list elements is dictionary containing your updated rms
"""
n_layers = len(parameters) # number of layers in the neural networks
#print('this is test of t in all updated values',t)
# Momentum update for each parameter
for i in range(n_layers):
v[i]['dW'] = beta1*v[i]['dW'] + (1.0-beta1)*gradients[i]['dW']
v[i]['dW'] = v[i]['dW']/(1.0-beta1**t)
v[i]['db'] = beta1*v[i]['db'] + (1.0-beta1)*gradients[i]['db']
v[i]['db'] = v[i]['db']/(1.0-beta1**t)
s[i]['dW'] = beta2*s[i]['dW'] + (1.0-beta2)*np.multiply(gradients[i]['dW'],gradients[i]['dW'])
s[i]['dW'] = s[i]['dW']/(1.0 - beta2**t)
s[i]['db'] = beta2*s[i]['db'] + (1.0-beta2)*np.multiply(gradients[i]['db'],gradients[i]['db'])
s[i]['db'] = s[i]['db']/(1.0 - beta2**t)
#rmsW_temp = np.sqrt(s[i]['dW']+epsol)
#rmsb_temp = np.sqrt(s[i]['db']+epsol)
parameters[i]['W'] += -learning_rate*np.divide(v[i]['dW'],np.sqrt(s[i]['dW']+epsol))
parameters[i]['b'] += -learning_rate*np.divide(v[i]['db'],np.sqrt(s[i]['db']+epsol))
return parameters, v,s
def cost_function(AL,Y):
'''
function to calc cost values
Inputs: AL last layers' cache matrix A
Y: labeled samples targets
Outputs:
loss: total cost function values
'''
m_samples = Y.shape[1]
AL = AL.reshape(-1,1)
Y = Y.reshape(-1,1)
#print(AL.shape,Y.shape)
assert(AL.shape == Y.shape)
loss = np.dot(Y.T,np.log(AL)) + np.dot((1.0-Y).T,np.log(1.0-AL))
loss = -loss / np.float(m_samples)
loss = loss.reshape(-1,1)
loss = loss[0]
return loss
def predict(AL,Y):
'''
function use learned parameters to predict
Inputs:
AL: last layer cache matrix
Y: labeled datas
Outputs:
accuracy: the predict accuracy real number
'''
AL = AL.reshape(-1,1)
Y = Y.reshape(-1,1)
assert(AL.shape == Y.shape)
m_samples = Y.shape[0]
counts = 0.0
for i in range(m_samples):
if AL[i] >=0.5:
AL[i] = 1.0
else:
AL[i] = 0.0
accuracy = np.sum(AL == Y)/np.float(m_samples)
return accuracy
2、本次作业实现模型:batch gradient descent, mini_batch gradient descent,stochestic gradient descent,momentum,adam。分别保存在如下代码段:
class2_week2.py 其对应有五个模型,但在进行mementum和adam方法计算的时候,如果采用含有beta的指数修正形式,相应计算会出错,在将beta设置成0.8时计算可进行。同理在adam中beta1 和beta2均不可设置为0.9和0.999,将这些值改小可进行计算。相应错误还没有进行排查(但是不用担心,如不进行beta和beta1及beta2的修正,对应是dw和db的值变小,相当于学习率变慢了,不影响稳定性。)在五个模型中,收敛速度mini_batch ,momentum,adam三者相近,均大于batch_gradient_descent >sgd。代码段如下:
import numpy as np
import matplotlib.pyplot as plt
import sklearn.datasets
import h5py
import pdb
#matplotlib inline
from optim_utils import sigmoid_forward,sigmoid_backward,relu_forward,relu_backward
from optim_utils import linear_forward,linear_backward,linear_activation_forward,linear_activation_backward
from optim_utils import n_layers_forward,n_layers_backward,cost_function,predict,model_pred
from optim_utils import update_parameters,update_parameters_with_momentum,update_parameters_with_adam
from optim_utils import n_layers_backward_l2,cost_function_l2
from optim_utils import load_2D_dataset,load_data_moon,load_data_cat
from optim_utils import plot_decision_boundary,random_mini_batches
from optim_utils import n_layers_forward_drop,n_layers_backward_drop_l2
from optim_utils import init,init_drop,mini_init,initialize_velocity,initialize_adam
import time
learning_rate = 0.01
#x_train,x_test,y_train, y_test = load_2D_dataset()
x_train,x_test,y_train, y_test = load_data_moon()
plt.show()
t0 = time.time()
units_list = [x_train.shape[0],15,5,1]
activation_list = ['None','relu','relu','sigmoid']
drop_list = [(1,0.86),(2,0.86)]
l2 = True
lambd = 0.0
initialization = 'random'
def model_with_gd(X,Y,units_list,actiation_list,learning_rate=0.0075,initialization='random'):
'''
batch normalize method optimize
'''
n_layers = len(units_list)
epoch_num = 5000
m_samples = X.shape[1]
accuracy_list = []
accuracy_test = []
steps = []
loss_list = []
#plt.ion()
loss_temp = 0.0
parameters,caches,gradients = init(X,units_list,initialization=initialization)
for i in range(epoch_num):
caches = n_layers_forward(parameters,caches,activation_list)
loss = cost_function(caches[n_layers-1]['A'],Y)
dloss = np.abs(loss - loss_temp)/(np.abs(loss)+np.abs(loss_temp))
gradients = n_layers_backward(Y,parameters,caches,gradients,activation_list)
parameters = update_parameters(parameters,gradients,learning_rate)
loss_temp = loss
if(i%200 == 0):
print('Steps is{0}, total loss value is:{1},resudal is:{2}'.format(i,loss,dloss))
steps.append(i)
loss_list.append(loss)
accuracy_list.append(predict(caches[n_layers-1]['A'],Y))
predictions = model_pred(x_test,parameters,activation_list)
accuracy_test.append(predict(predictions,y_test))
print('Steps is{0},trainning accuracy is:{1},test_accuracy is:{2}'.format(i,accuracy_list[-1],accuracy_test[-1]))
plt.figure(2)
line1, = plt.plot(steps,loss_list, 'g',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Total Loss')
plt.title('Trainning loss vs steps learning_rate is:{0} l2 is{1}'.format(learning_rate,l2))
plt.legend([line1],['Trainning loss'],loc='best')
plt.figure(3)
line2, = plt.plot(steps,accuracy_list,'r',linewidth=1.5)
line3, = plt.plot(steps,accuracy_test,'b',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Accuracy')
plt.title('Accuracy vs Steps! learning_rate:{0},l2:{1}'.format(learning_rate, l2))
plt.legend([line2,line3],['Trainning accuracy','Test accuracy'],loc='best')
#plt.pause(0.01)
return parameters, loss
def model_with_sgd(X,Y,units_list,actiation_list,learning_rate=0.0075,initialization='random'):
'''
batch normalize method optimize
'''
n_layers = len(units_list)
epoch_num = 5000
m_samples = X.shape[1]
accuracy_list = []
accuracy_test = []
steps = []
loss_list = []
#plt.ion()
loss_temp = 0.0
# batch_size = 64
# mini_batches = random_mini_batches(X,Y,mini_batch_size=batch_size)
# num_batch = len(mini_batches)
parameters,caches,gradients = init(X,units_list,initialization=initialization)
for i in range(epoch_num):
for j in range(m_samples):
x_sample = X[:,j].reshape(-1,1)
y_sample = Y[:,j].reshape(-1,1)
caches, gradients = mini_init(x_sample,units_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model")
# print(caches[i]['Z'].shape,caches[i]['A'].shape)
caches = n_layers_forward(parameters,caches,activation_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model")
# print(caches[i]['Z'].shape,caches[i]['A'].shape)
loss = cost_function(caches[n_layers-1]['A'],y_sample)
dloss = np.abs(loss - loss_temp)/(np.abs(loss)+np.abs(loss_temp))
gradients = n_layers_backward(y_sample,parameters,caches,gradients,activation_list)
parameters = update_parameters(parameters,gradients,learning_rate)
loss_temp = loss
if(i%20 == 0):
print('Steps is{0}, total loss value is:{1},resudal is:{2}'.format(i,loss,dloss))
steps.append(i)
loss_list.append(loss)
predictions_train = model_pred(X,parameters,activation_list)
accuracy_list.append(predict(predictions_train,Y))
predictions = model_pred(x_test,parameters,activation_list)
accuracy_test.append(predict(predictions,y_test))
print('Steps is{0},trainning accuracy is:{1},test_accuracy is:{2}'.format(i,accuracy_list[-1],accuracy_test[-1]))
plt.figure(2)
line1, = plt.plot(steps,loss_list, 'g',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Total Loss')
plt.title('Trainning loss vs steps learning_rate is:{0} l2 is{1}'.format(learning_rate,l2))
plt.legend([line1],['Trainning loss'],loc='best')
plt.figure(3)
line2, = plt.plot(steps,accuracy_list,'r',linewidth=1.5)
line3, = plt.plot(steps,accuracy_test,'b',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Accuracy')
plt.title('Accuracy vs Steps! learning_rate:{0},l2:{1}'.format(learning_rate, l2))
plt.legend([line2,line3],['Trainning accuracy','Test accuracy'],loc='best')
#plt.pause(0.01)
return parameters, loss
def model_with_bgd(X,Y,units_list,actiation_list,learning_rate=0.0075,initialization='random'):
'''
batch normalize method optimize
'''
n_layers = len(units_list)
epoch_num = 5000
m_samples = X.shape[1]
accuracy_list = []
accuracy_test = []
steps = []
loss_list = []
#plt.ion()
loss_temp = 0.0
batch_size = 64
mini_batches = random_mini_batches(X,Y,mini_batch_size=batch_size)
num_batch = len(mini_batches)
parameters,caches,gradients = init(X,units_list,initialization=initialization)
for i in range(epoch_num):
for j in range(num_batch):
x_sample = mini_batches[j][0]
y_sample = mini_batches[j][1]
caches, gradients = mini_init(x_sample,units_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model")
# print(caches[i]['Z'].shape,caches[i]['A'].shape)
caches = n_layers_forward(parameters,caches,activation_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model")
# print(caches[i]['Z'].shape,caches[i]['A'].shape)
loss = cost_function(caches[n_layers-1]['A'],y_sample)
dloss = np.abs(loss - loss_temp)/(np.abs(loss)+np.abs(loss_temp))
gradients = n_layers_backward(y_sample,parameters,caches,gradients,activation_list)
parameters = update_parameters(parameters,gradients,learning_rate)
loss_temp = loss
if(i%20 == 0):
print('Steps is{0}, total loss value is:{1},resudal is:{2}'.format(i,loss,dloss))
steps.append(i)
loss_list.append(loss)
predictions_train = model_pred(X,parameters,activation_list)
accuracy_list.append(predict(predictions_train,Y))
predictions = model_pred(x_test,parameters,activation_list)
accuracy_test.append(predict(predictions,y_test))
print('Steps is{0},trainning accuracy is:{1},test_accuracy is:{2}'.format(i,accuracy_list[-1],accuracy_test[-1]))
plt.figure(2)
line1, = plt.plot(steps,loss_list, 'g',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Total Loss')
plt.title('Trainning loss vs steps learning_rate is:{0} l2 is{1}'.format(learning_rate,l2))
plt.legend([line1],['Trainning loss'],loc='best')
plt.figure(3)
line2, = plt.plot(steps,accuracy_list,'r',linewidth=1.5)
line3, = plt.plot(steps,accuracy_test,'b',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Accuracy')
plt.title('Accuracy vs Steps! learning_rate:{0},l2:{1}'.format(learning_rate, l2))
plt.legend([line2,line3],['Trainning accuracy','Test accuracy'],loc='best')
#plt.pause(0.01)
return parameters, loss
def model_with_bgd_momentum(X,Y,units_list,actiation_list,learning_rate=0.0075,initialization='random'):
'''
batch normalize method optimize
'''
n_layers = len(units_list)
epoch_num = 5000
m_samples = X.shape[1]
accuracy_list = []
accuracy_test = []
steps = []
loss_list = []
#plt.ion()
loss_temp = 0.0
batch_size = 64
beta = 0.9
mini_batches = random_mini_batches(X,Y,mini_batch_size=batch_size)
num_batch = len(mini_batches)
parameters,caches,gradients = init(X,units_list,initialization=initialization)
velocity = initialize_velocity(parameters)
for i in range(epoch_num):
for j in range(num_batch):
x_sample = mini_batches[j][0]
y_sample = mini_batches[j][1]
caches, gradients = mini_init(x_sample,units_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model",len(units_list),len(parameters))
# print(parameters[i]['W'].shape,parameters[i]['b'].shape)
caches = n_layers_forward(parameters,caches,activation_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model")
# print(caches[i]['Z'].shape,caches[i]['A'].shape)
loss = cost_function(caches[n_layers-1]['A'],y_sample)
dloss = np.abs(loss - loss_temp)/(np.abs(loss)+np.abs(loss_temp))
gradients = n_layers_backward(y_sample,parameters,caches,gradients,activation_list)
parameters,velocity = update_parameters_with_momentum(parameters,gradients,velocity,beta,learning_rate)
loss_temp = loss
if(i%20 == 0):
print('Steps is{0}, total loss value is:{1},resudal is:{2}'.format(i,loss,dloss))
steps.append(i)
loss_list.append(loss)
predictions_train = model_pred(X,parameters,activation_list)
accuracy_list.append(predict(predictions_train,Y))
predictions = model_pred(x_test,parameters,activation_list)
accuracy_test.append(predict(predictions,y_test))
print('Steps is{0},trainning accuracy is:{1},test_accuracy is:{2}'.format(i,accuracy_list[-1],accuracy_test[-1]))
plt.figure(2)
line1, = plt.plot(steps,loss_list, 'g',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Total Loss')
plt.title('Trainning loss vs steps learning_rate is:{0} l2 is{1}'.format(learning_rate,l2))
plt.legend([line1],['Trainning loss'],loc='best')
plt.figure(3)
line2, = plt.plot(steps,accuracy_list,'r',linewidth=1.5)
line3, = plt.plot(steps,accuracy_test,'b',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Accuracy')
plt.title('Accuracy vs Steps! learning_rate:{0},l2:{1}'.format(learning_rate, l2))
plt.legend([line2,line3],['Trainning accuracy','Test accuracy'],loc='best')
#plt.pause(0.01)
return parameters, loss
def model_with_bgd_adam(X,Y,units_list,actiation_list,learning_rate=0.0075,initialization='random'):
'''
batch normalize method optimize
'''
n_layers = len(units_list)
epoch_num = 5000
m_samples = X.shape[1]
accuracy_list = []
accuracy_test = []
steps = []
loss_list = []
#plt.ion()
loss_temp = 0.0
batch_size = 64
beta = 0.9
mini_batches = random_mini_batches(X,Y,mini_batch_size=batch_size)
num_batch = len(mini_batches)
parameters,caches,gradients = init(X,units_list,initialization=initialization)
velocity,squared = initialize_adam(parameters)
update_counts = 1
for i in range(epoch_num):
for j in range(num_batch):
x_sample = mini_batches[j][0]
y_sample = mini_batches[j][1]
caches, gradients = mini_init(x_sample,units_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model",len(units_list),len(parameters))
# print(parameters[i]['W'].shape,parameters[i]['b'].shape)
caches = n_layers_forward(parameters,caches,activation_list)
# for i in range(len(units_list)):
# print(i,"'th' output in model")
# print(caches[i]['Z'].shape,caches[i]['A'].shape)
loss = cost_function(caches[n_layers-1]['A'],y_sample)
dloss = np.abs(loss - loss_temp)/(np.abs(loss)+np.abs(loss_temp))
gradients = n_layers_backward(y_sample,parameters,caches,gradients,activation_list)
parameters,velocity,squared = update_parameters_with_adam(parameters,gradients,velocity,squared,update_counts,learning_rate)
update_counts += 1
loss_temp = loss
if(i%20 == 0):
print('Steps is{0}, total loss value is:{1},resudal is:{2}'.format(i,loss,dloss))
steps.append(i)
loss_list.append(loss)
predictions_train = model_pred(X,parameters,activation_list)
accuracy_list.append(predict(predictions_train,Y))
predictions = model_pred(x_test,parameters,activation_list)
accuracy_test.append(predict(predictions,y_test))
print('Steps is{0},trainning accuracy is:{1},test_accuracy is:{2}'.format(i,accuracy_list[-1],accuracy_test[-1]))
plt.figure(2)
line1, = plt.plot(steps,loss_list, 'g',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Total Loss')
plt.title('Trainning loss vs steps learning_rate is:{0} l2 is{1}'.format(learning_rate,l2))
plt.legend([line1],['Trainning loss'],loc='best')
plt.figure(3)
line2, = plt.plot(steps,accuracy_list,'r',linewidth=1.5)
line3, = plt.plot(steps,accuracy_test,'b',linewidth=1.5)
plt.xlabel('Trainning steps')
plt.ylabel('Accuracy')
plt.title('Accuracy vs Steps! learning_rate:{0},l2:{1}'.format(learning_rate, l2))
plt.legend([line2,line3],['Trainning accuracy','Test accuracy'],loc='best')
#plt.pause(0.01)
return parameters, loss
parameters, loss = model_with_gd(x_train,y_train,units_list,activation_list,learning_rate,initialization)
#parameters, loss = model_with_bgd(x_train,y_train,units_list,activation_list,learning_rate,initialization)
#parameters, loss = model_with_sgd(x_train,y_train,units_list,activation_list,learning_rate,initialization)
#parameters, loss = model_with_bgd_momentum(x_train,y_train,units_list,activation_list,learning_rate,initialization)
#parameters, loss = model_with_bgd_adam(x_train,y_train,units_list,activation_list,learning_rate,initialization)
t1 = time.time()
print("this is calc total time:{} seconds".format(t1-t0))
plt.show()
plt.figure(4)
plt.xlabel('X Feature 1')
plt.ylabel('X Feature 2')
plt.title(' trainning data and model boundary')
plot_decision_boundary(lambda x:model_pred(x,parameters,activation_list),x_train,y_train)
print('final loss is:',loss)