实现一个n层的神经网络
code:
import numpy as np
import h5py
import matplotlib.pyplot as plt
from testCases_v2 import *
from dnn_utils_v2 import sigmoid, sigmoid_backward, relu, relu_backward
from lr_utils import load_dataset
class dnn:
def __init__(self,layer_dims) -> None:
self.WL={}
self.bL={}
self.L=len(layer_dims)-1
np.random.seed(3)
# 初始化参数
for i in range(1,self.L+1):
#注意这里的初始化,如果使用np.random.randn(layer_dims[i], layer_dims[i-1])*0.01训练过程中会出现梯度消失
self.WL['W'+str(i)]=np.random.randn(layer_dims[i], layer_dims[i-1]) / np.sqrt(layer_dims[i-1])
self.bL['b'+str(i)]=np.zeros((layer_dims[i],1))
self.XL={}
self.AL={}
self.ZL={}
self.dZ={}
self.dW={}
self.db={}
self.dA={}
def input_data(self,X,Y,learning_rate):
self.m=X.shape[1]
self.AL["A0"]=X
self.Y=Y
self.learning_rate=learning_rate
def set_data(self,X):
self.m=X.shape[1]
self.AL["A0"]=X
#下面是前向传播模块,前向传播过程中需要记录Z,A
def linear_activation_forward(self,i,activation):
'''实现一层的正向传播'''
# self.ZL[Zi]和self.AL[Ai]记录第i层的数据
# 存储了计算的Z ,A,W,b,对象自带的有
self.ZL['Z'+str(i)]=np.dot(self.WL['W'+str(i)],self.AL['A'+str(i-1)])+self.bL['b'+str(i)]
if activation=="sigmoid":
self.AL['A'+str(i)]=1/(1+np.exp(-self.ZL['Z'+str(i)]))
elif activation=="relu":
self.AL['A'+str(i)]=np.maximum(0,self.ZL['Z'+str(i)])
def L_model_forward(self):
# 前L-1层使用relu函数激活,最后一层使用sigmoid函数激活
for i in range(1,self.L):
self.linear_activation_forward(i,"relu")
self.linear_activation_forward(self.L,"sigmoid")
# 确定最后的输出是否是二分类所需要的输出
assert(self.AL['A'+str(self.L)].shape==(1,self.m))
# 下面是计算损失函数
def computer_cost(self):
return np.squeeze(-1/self.m*np.sum(self.Y*np.log(self.AL['A'+str(self.L)])+(1-self.Y)*np.log(1-self.AL['A'+str(self.L)])))
# 下面是反向传播模块
def linear_backward(self,i):
'''根据dz[L]计算dw[L],db[L],dA[L-1]'''
self.dW['dW'+str(i)]=1/self.m*np.dot(self.dZ['dZ'+str(i)],self.AL["A"+str(i-1)].T)
self.db['db'+str(i)]=1/self.m*np.sum(self.dZ['dZ'+str(i)],axis=1,keepdims=True)
self.dA['dA'+str(i-1)]=np.dot(self.WL['W'+str(i)].T,self.dZ['dZ'+str(i)])
# print(self.dW['dW'+str(i)])
def L_model_backforward(self):
# 先计算最后一层
# 计算dA
self.dA['dA'+str(self.L)]=-(np.divide(self.Y,self.AL['A'+str(self.L)])-np.divide(1-self.Y,1-self.AL['A'+str(self.L)]))
# 计算dz
# self.dZ['dZ'+str(self.L)]=sigmoid_backward(self.dA['dA'+str(self.L)],self.ZL['Z'+str(self.L)])
s=1/(1+np.exp(-self.ZL['Z'+str(self.L)]))
self.dZ['dZ'+str(self.L)]=self.dA['dA'+str(self.L)]*s*(1-s)
# 计算dw,db,dA[L-1]
self.linear_backward(self.L)
for i in reversed(range(self.L)):
if i==0:
break
else:
self.dZ['dZ'+str(i)]=relu_backward(self.dA['dA'+str(i)],self.ZL['Z'+str(i)])
# 计算当前i层的dw,db,和dA[L-1]
self.linear_backward(i)
# 更新参数
def update_wb(self):
for i in range(1,self.L+1):
self.WL['W'+str(i)]=self.WL['W'+str(i)]-self.learning_rate*self.dW['dW'+str(i)]
self.bL['b'+str(i)]=self.bL['b'+str(i)]-self.learning_rate*self.db['db'+str(i)]
def train(self,iterations):
costs=[]
for i in range(iterations):
# 前向传播
self.L_model_forward()
# 计算损失
cost=self.computer_cost()
# 后向传播
self.L_model_backforward()
# 更新参数
self.update_wb()
if i%100==0:
costs.append(cost)
print("第"+str(i)+"次迭代cost:"+str(cost))
return costs
def predict(self,X):
self.set_data(X)
self.L_model_forward()
return self.AL['A'+str(self.L)]>=0.5
np.random.seed(1)
#加载数据
train_x_orig, train_y, test_x_orig, test_y, classes = load_dataset()
# train_x_orig:(209, 64, 64, 3)
# train_y:(1, 209)
# 数据处理
train_x=train_x_orig.reshape(train_x_orig.shape[0],-1).T
train_x=train_x/255
test_x=test_x_orig.reshape(test_x_orig.shape[0],-1).T
test_x=test_x/255
# 4层的神经网络
my_dnn=dnn([12288, 20, 7, 5, 1])
my_dnn.input_data(train_x,train_y,0.0075)
print("开始训练")
costs=my_dnn.train(2400)
# mucount=my_dnn.bL['b'+str(my_dnn.L)]-my_dnn.learning_rate*my_dnn.db['db'+str(my_dnn.L)]
# print(my_dnn.db)
# print(my_dnn.WL)
# print(my_dnn.bL)
# print(costs)
print("训练结束")
plt.plot(costs)
plt.show()
y_predict_train=my_dnn.predict(train_x)
y_predict_test=my_dnn.predict(test_x)
# 准确率
print("训练集准确率:")
print(str((1-np.sum(np.abs(y_predict_train-train_y))/train_y.shape[1])*100)+"%")
print("测试集准确率:")
print(str((1-np.sum(np.abs(y_predict_test-test_y))/test_y.shape[1])*100)+"%")
结果:
开始训练
第0次迭代cost:0.7157315134137129
第100次迭代cost:0.6747377593469114
第200次迭代cost:0.6603365433622128
第300次迭代cost:0.6462887802148751
第400次迭代cost:0.6298131216927771
第500次迭代cost:0.606005622926534
第600次迭代cost:0.5690041263975135
第700次迭代cost:0.519796535043806
第800次迭代cost:0.46415716786282285
第900次迭代cost:0.4084203004829892
第1000次迭代cost:0.3731549921606903
第1100次迭代cost:0.3057237457304712
第1200次迭代cost:0.26810152847740837
第1300次迭代cost:0.23872474827672635
第1400次迭代cost:0.20632263257914715
第1500次迭代cost:0.17943886927493577
第1600次迭代cost:0.15798735818801457
第1700次迭代cost:0.1424041301227425
第1800次迭代cost:0.1286516599788751
第1900次迭代cost:0.11244314998160745
第2000次迭代cost:0.08505631034976016
第2100次迭代cost:0.05758391198612255
第2200次迭代cost:0.04456753454696991
第2300次迭代cost:0.038082751665992044
训练结束
训练集准确率:
99.52153110047847%
测试集准确率:
78.0%