深度学习入门(基于python实现)--第四章 神经网络的学习 03

# _*_ coding:UTF-8 _*_
# 开发人员:zyh
# 开发时间:2020/8/29 8:26
# 文件名:Learning_of_Neural_Networks_03.py
# 开发工具:PyCharm
"""
神经网络的梯度
"""
# import sys, os
import matplotlib.pylab as plt
# sys.path.append(os.pardir)
import numpy as np


# from chapter3_neural_network.neural_network_03 import softmax
# from chapter4_Learning_of_Neural_Networks.Learning_of_Neural_Networks_01 import cross_entropy_error
# from chapter4_Learning_of_Neural_Networks.Learning_of_Neural_Networks_02 import numerical_gradient


def softmax(a):
    c = np.max(a)  # 防止溢出,比如e的100次方就是一个很大的数了,具体数学论证请看书上内容
    exp_a = np.exp(a - c)
    sum_exp_a = np.sum(exp_a)
    y = exp_a / sum_exp_a
    return y


def cross_entropy_error(y, t):
    """交叉熵误差"""
    delta = 1e-7  # 一个保护性数据,防止np.log(0) 无穷大
    return -np.sum(t * np.log(y + delta))


def numerical_gradient(f, x):
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)

    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        idx = it.multi_index
        tmp_val = x[idx]
        x[idx] = tmp_val + h
        fxh1 = f(x)  # f(x+h)

        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)

        x[idx] = tmp_val  # 値を元に戻す
        it.iternext()

    return grad
class SimpleNet(object):
    """一个简单的神经网络类"""

    def __init__(self):
        self.W = np.random.randn(2, 3)  # 利用高斯分布来初始化一个2×3矩阵,获得权重

    def predict(self, x):
        return np.dot(x, self.W)

    def loss(self, x, t):
        z = self.predict(x)
        y = softmax(z)
        loss = cross_entropy_error(y, t)

        return loss


net = SimpleNet()
print(net.W)
x = np.array([0.6, 0.9])
p = net.predict(x)
print(p)
t = np.array([0, 0, 1])
print(net.loss(x, t))


def sigmoid(x):
    """
    sigmoid函数
    :param x: list[int]
    :return: list[int]
    """
    return 1 / (1 + np.exp(-x))


"""
下面来完整的实现下神经网络的学习过程,以两层学习网络为例
神经网络的学习分成以下四个步骤
① mini-batch
从训练数据中选出一部分数据来进行拟合训练,并根据结果来调整参数以减小损失函数值
② 计算梯度
求出各个权重参数的梯度,为参数更新提供参考
③ 更新参数
将权重参数沿着梯度方向更新
④ 重复
重复 一、二和三步骤

"""


class TwoLayerNet(object):
    def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01):
        """
        神经网络初始化
        :param input_size: 输入层的神经元数
        :param hidden_size: 隐藏层神经元数
        :param output_size: 输出层神经元数
        :param weight_init_std:
        """
        # 初始化权重
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

    def predict(self, x):
        """
        进行识别
        :param x:图像数据
        :return: 输出预测0,1
        """
        W1, W2 = self.params['W1'], self.params['W2']
        b1, b2 = self.params['b1'], self.params['b2']

        a1 = np.dot(x, W1) + b1
        z1 = sigmoid(a1)
        a2 = np.dot(z1, W2) + b2
        y = softmax(a2)

        return y

    # x:输入数据, t:监督数据即标签
    def loss(self, x, t):
        y = self.predict(x)

        return cross_entropy_error(y, t)

    def accuracy(self, x, t):
        y = self.predict(x)
        y = np.argmax(y, axis=1)
        t = np.argmax(t, axis=1)

        accuracy = np.sum(y == t) / float(x.shape[0])
        return accuracy

    # x:输入数据, t:监督数据即标签
    def numerical_gradient(self, x, t):
        loss_W = lambda W: self.loss(x, t)

        grads = {}
        grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
        grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
        grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
        grads['b2'] = numerical_gradient(loss_W, self.params['b2'])

        return grads


"""mini-batch的实现"""
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)

train_loss_list = []

# 超参数
iters_num = 10000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)
train_acc_list = []
test_acc_list = []

iter_per_epoch = max(train_size / batch_size, 1)
for i in range(iters_num):
    # 获取mini-batch
    batch_mask = np.random.choice(train_size, batch_size)
    x_batch = x_train[batch_mask]
    t_batch = t_train[batch_mask]

    # 计算梯度
    grad = network.numerical_gradient(x_batch, t_batch)

    # 更新参数
    for key in ('W1', 'b1', 'W2', 'b2'):
        network.params[key] -= learning_rate * grad[key]

    loss = network.loss(x_batch, t_batch)
    train_loss_list.append(loss)

    if i % iter_per_epoch == 0:
        train_acc = network.accuracy(x_train, t_train)
        test_acc = network.accuracy(x_test, t_test)
        train_acc_list.append(train_acc)
        test_acc_list.append(test_acc)
        print("train acc, test acc | " + str(train_acc) + ", " + str(test_acc))
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值