手撕代码系列:L2正则化的标准神经网络

python3实现两层神经网络——分类猫图,数据集使用吴恩达第一课第二周的数据集(可在我的博客或百度吴恩达第一课课后编程作业下载,下载文件和代码放在同一路径下)。隐藏层节点数10,学习率0.1,正则化参数lambd为0.65,在迭代1900步时对测试集预测准确率最大,最大为80%,比逻辑回归70%的准确率高得多,将近深层神经网络的82%。

# -*- coding: utf-8 -*-
"""
Created on Sat Jun  1 22:40:07 2019

@author: Administrator
"""

import numpy as np
from dnn_utils_v1_NN import sigmoid,relu,relu_backward,sigmoid_backward
import lr_utils
import matplotlib.pyplot as plt
'''
问题描述:将28x28像素的图片进行判断是否是猫图
两层网络,隐藏层10个节点,激活函数ReLU,输出层一个节点,激活函数sigmoid
'''
#数据载入
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = lr_utils.load_dataset()
#print(type(train_set_x_orig))#<class 'numpy.ndarray'>
#print(train_set_x_orig.shape,train_set_y.shape)#(209, 64, 64, 3) (1, 209)
'''
#查验数据集内图片
index = 3
plt.imshow(train_set_x_orig[index])
'''
train_x_size = train_set_x_orig.shape[0]
test_x_size = test_set_x_orig.shape[0]
image_height = image_weight = train_set_x_orig.shape[1]

#数据预处理:确定问题规模,展平,归一化
train_set_x = train_set_x_orig.reshape(train_x_size,image_height*image_weight*3).T
test_set_x = test_set_x_orig.reshape(test_x_size,image_height*image_weight*3).T
#print(train_set_x.shape)#(12288,209)
train_set_x = train_set_x/255.0#归一化
test_set_x = test_set_x/255.0
#print(train_set_x[1:10,1:10])

#初始化参数矩阵
def Parameter(X, dim):
    '''
    输入:训练集,隐藏层节点数
    输出:初始化的参数矩阵W1b1,W2b2,用字典parameters保存
    '''
    np.random.seed(3)
    features = X.shape[0]
#    train_size = X.shape[1]
    W1 = np.random.random((dim, features))/features
    b1 = np.zeros((dim, 1))
    W2 = np.random.random((1, dim))*(2/dim)
    b2 = np.zeros((1, 1))
    parameters = {'W1':W1,
                  'b1':b1,
                  'W2':W2,
                  'b2':b2}
    return parameters
'''
P = Parameter(train_set_x, dim=10)
print(P['W1'])
'''

#正向传播
def Proptation(X, parameters):
    '''
    输入:参数矩阵,训练集
    输出:存在字典proptation中的Z1,A1,Z2,A1
    '''
    W1=parameters["W1"]
    b1=parameters['b1']
    W2=parameters['W2']
    b2=parameters['b2']
    Z1 = np.dot(W1, X) + b1
#    print(Z1.shape)
    A1 = sigmoid(Z1)
    A1 = np.array(A1)
#    print("A1",A1)
#    print(A1.shape)
    Z2 = np.dot(W2, A1) + b2
    A2 =sigmoid(Z2)
    A2 = np.array(A2)
#    print("A2",A2)
#    print(A2.shape)
    proptation={'Z1':Z1,
                'A1':A1,
                'Z2':Z2,
                'A2':A2}
    return proptation

#计算总误差
def Cost(proptation, Y, lambd, parameters):
    '''
    输入:A2 和 Y,利用交叉熵计算总损失函数值
    输出:损失函数值
    '''
    W1 = parameters['W1']
    W2 = parameters['W2']
    A2 = proptation['A2']
    m = Y.shape[1]
#    ones = np.ones((1,m))
    L2_regularization_cost =(np.sum(np.square(W1)) + np.sum(np.square(W2)))*lambd/(2*m)
    cost = (1./m) * (-np.dot(Y,np.log(A2).T) - np.dot(1-Y, np.log(1-A2).T)) 
    cost = np.squeeze(cost) + L2_regularization_cost
    assert (cost.shape == ())
    return cost

#计算dA2
def Cal_dAL(proptation, Y):
    '''
    输入:AL
    输出:dAL
    '''
    AL= proptation['A2']
    dAL = -(np.divide(Y, AL) -np.divide(1 - Y, 1 - AL))
    return dAL

#反向传播
def BN(X, Y, proptation,parameters, lambd):
    '''
    输入:训练集,标签集,正向传播参数Z,A,参数W,b
    输出:反向传播参数dW,db,用字典grad保存
    '''
    m = X.shape[1]
    A0 = np.copy(X)
    A1 = proptation['A1']
#    A2 = proptation['A2']
    Z1 = proptation['Z1']
    Z2 = proptation['Z2']
    W1 = parameters['W1']
    W2 = parameters['W2']
    
    dA2 = Cal_dAL(proptation, Y)
    dZ2 = sigmoid_backward(dA2, Z2)
    dW2 = 1/m * np.dot(dZ2, A1.T) + lambd/m*W2
    db2 = 1/m * np.sum(dZ2, axis=1, keepdims=True)
    dA1 = np.dot(W2.T, dZ2)
    dZ1 = sigmoid_backward(dA1, Z1)
    dW1 = 1/m * np.dot(dZ1, A0.T) + lambd/m*W1
    db1 = 1/m * np.sum(dZ1, axis=1, keepdims=True)
    grad={'dW1':dW1,
          'dW2':dW2,
          'db1':db1,
          'db2':db2}
    return grad

#更新参数
def Renew_Parameters(parameters,grad, lr):
    '''
    输入:参数,梯度,学习率
    输出:更新后的参数
    '''
    W1 = parameters['W1']
    W2 = parameters['W2']
    b1 = parameters['b1']
    b2 = parameters['b2']  
    dW1 = grad['dW1']
    dW2 = grad['dW2']
    db1 = grad['db1']
    db2 = grad['db2']
    W1 = W1 - lr*dW1
    W2 = W2 - lr*dW2
    b1 = b1 - lr*db1
    b2 = b2 - lr*db2
    parameters = {'W1':W1,
                  'b1':b1,
                  'W2':W2,
                  'b2':b2}
    return parameters

#做预测
def Prediction(proptation, Y):
    '''
    输入:预测的A2,真实的Y
    输出:预测值,预测百分率
    '''
    m = Y.shape[1]
    A2 = proptation['A2']
    prediction = []
    for i in range(m):
        if A2[0,i] <= 0.5:
            prediction.append(0)
        else:
            prediction.append(1)
    assert(len(prediction) == m)
    accruacy_rate = (np.sum(prediction==Y)/m)*100#准确率是accruacy_rate%
    return prediction, accruacy_rate
    
#建立模型
def Model(X, Y, test_set_x,test_set_y,dim=10, lr=0.01, num_iteratiom=10, print_cost=False, lambd=0.1):
    parameters = Parameter(X, dim)                         #参数初始化
    for i in range(num_iteratiom):                         #开始迭代 
        proptation = Proptation(X, parameters)             #正向传播
        cost = Cost(proptation, Y, lambd, parameters)      #计算误差
        grad = BN(X, Y, proptation, parameters, lambd)            #反向传播
        parameters = Renew_Parameters(parameters,grad, lr) #参数更新
        if print_cost==True and (i%100)==0 :
            prediction, accruacy_rate = Prediction(proptation, Y)
#            print('训练集预测准确率为: {}%'.format(accruacy_rate))
            pro = Proptation(test_set_x, parameters)       #获得对测试集的预测
            pred, rate = Prediction(pro, test_set_y)       #对测试集预测的准确率
            print('第{}轮迭代,训练集预测准确率为: {}%,测试集预测准确率为: {}%,'.format(i,accruacy_rate, rate))
    p = {'dim':10,                                         #记录模型参数
         'num_iteration':10000,
         'lr':0.01,
         'W1':parameters['W1'],
         'W2':parameters['W2'],
         'b1':parameters['b1'],
         'b2':parameters['b2'],
            }
    return p
p = Model(train_set_x, train_set_y,test_set_x, test_set_y, lambd=0.65a, dim=10, lr=0.1, num_iteratiom=10000, print_cost=True)

#对测试集测试
#proptation = Proptation(test_set_x, p)
#pred, accruacy_rate = Prediction(proptation, test_set_y)
#print('测试集预测准确率为: {}%'.format(accruacy_rate))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值