DNN反向传播算法

  • DNN的损失函数用梯度下降法进行迭代优化求极小值的过程即为反向传播算法
  • DNN反向传播算法的目的
    输入层有n个神经元,输出层有n个神经元,还有若干神经元的隐藏层,需要找到合适的所有隐藏层和输出层对应的线性权重系数矩阵W,偏置向量b,让所有的训练样本输入计算出的输出尽可能的等于或很接近样本输出的真实值
  • 如何得到权重系数W和偏置b
    选择合适的损失函数来度量训练样本的输出损失,对这个损失函数进行优化求极小值,最终求得对应的一系列线性权重系数矩阵W,偏置向量 b

利用前向传播反向传播进行模型训练预测

import numpy as np
import h5py
    
    
def load_dataset():
    train_dataset = h5py.File('datasets/train_catvnoncat.h5', "r")
    train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
    train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels

    test_dataset = h5py.File('datasets/test_catvnoncat.h5', "r")
    test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
    test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels

    classes = np.array(test_dataset["list_classes"][:]) # the list of classes
    
    train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
    test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
    
    return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes

"""神经网络的模型读取训练过程
"""
import numpy as np
from data import load_dataset

def basic_sigmoid(x):
    s = 1 / (1 + np.exp(-x))
    return s

def initialize_with_zeros(shape):
    w = np.zeros((shape, 1))
    b = 0
    return w, b

def propagate(w, b, X, Y):
    """单个神经元NN的前向传播和反向传播过程实现
    :param w: 权重 (shape, 1)
    :param b: 偏置
    :param X: 特征值
    :param Y: 目标值
    :return:grads, cost
    """
    m = X.shape[1]
    # 前向传播
    # w (64 * 64 * 3, 1), X (64 * 64 * 3, 209)
    A = basic_sigmoid(np.dot(w.T, X) + b)

    cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))

    # 反向传播
    dz = A - Y
    dw = 1 / m * np.dot(X, dz.T)
    db = 1 / m * np.sum(dz)

    grads = {
        "dw": dw,
        "db": db
    }

    return grads, cost


def optimize(w, b, train_x, train_y, num_iterations, learning_rate):
    """
    :param w: 训练的初始权重参数
    :param b: 训练初始偏置参数
    :param train_x: 训练集特征值
    :param train_y: 训练集目标值
    :param num_iterations: 迭代次数
    :param learning_rate: 学习率
    :return: params, grads, costs
    """
    costs = []

    for i in range(num_iterations):
        grads, cost = propagate(w, b, train_x, train_y)
        # w = w - alpha* (dw)
        w = w - learning_rate * grads['dw']
        b = b - learning_rate * grads['db']

        # 打印结果
        if i % 100 == 0:
            costs.append(cost)
            print("损失结果第 %i 次, 值为: %f" % (i, cost))

    # 参数进行返回
    params = {
        "w": w,
        "b": b}

    grads = {
        "dw": grads['dw'],
        "db": grads['db']}
    return params, grads, costs


def predict(w, b, X):
    m = X.shape[1]
    Y_prediction = np.zeros((1, m))
    w = w.reshape(X.shape[0], 1)
    A = basic_sigmoid(np.dot(w.T, X) + b)
    for i in range(A.shape[1]):
        if A[0, i] <= 0.5:
            Y_prediction[0, i] = 0
        else:
            Y_prediction[0, i] = 1
    assert (Y_prediction.shape == (1, m))
    return Y_prediction


def model(train_x, train_y, test_x, test_y, num_iterations=2000, learning_rate=0.005):
    """
    :param train_x: 训练数据集特征值
    :param train_y: 训练数据集目标值
    :param test_x: 测试数据集特征值
    :param test_y: 测试数据集目标值
    :param num_iterations: 迭代次数
    :param learning_rate: 学习率
    :return:
    """
    w, b = initialize_with_zeros(train_x.shape[0])
    params, grads, costs = optimize(w, b, train_x, train_y, num_iterations, learning_rate)

    Y_prediction_train = predict(params["w"], params["b"], train_x)
    Y_prediction_test = predict(params["w"], params["b"], test_x)

    print("训练集准确率: {} ".format(100 - np.mean(np.abs(Y_prediction_train - train_y)) * 100))
    print("测试集准确率: {} ".format(100 - np.mean(np.abs(Y_prediction_test - test_y)) * 100))

    d = {"costs": costs,
         "Y_prediction_test": Y_prediction_test,
         "Y_prediction_train": Y_prediction_train,
         "w": w,
         "b": b,
         "learning_rate": learning_rate,
         "num_iterations": num_iterations}

    return d


def main():
    # 1、读取样本数据
    train_x, train_y, test_x, test_y, classes = load_dataset()
    print("训练集的样本数: ", train_x.shape[0])
    print("测试集的样本数: ", test_x.shape[0])
    print("train_x形状: ", train_x.shape)
    print("train_y形状: ", train_y.shape)
    print("test_x形状: ", test_x.shape)
    print("test_x形状: ", test_y.shape)

    train_x = train_x.reshape(train_x.shape[0], -1).T
    test_x = test_x.reshape(test_x.shape[0], -1).T
    train_x = train_x / 255.
    test_x = test_x / 255.

    # 2、模型的训练以及预测过程
    d = model(train_x, train_y, test_x, test_y, num_iterations=2000, learning_rate=0.005)


if __name__ == '__main__':
    main()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值