# coding:utf-8
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
print(x_train.shape) # (6000. 784) 6000个数据,784维
print(t_train.shape) # (6000, 10) 6000个数据,10维
# --------------------------------抽取小批量的数据---------------------------------------------------
# 抽取小批量的数据
train_size = x_train.shape[0]
batch_size = 10 # 抽10个
batch_mask = np.random.choice(train_size, batch_size) # 从6000个数据中随机抽取10个 获得其索引
x_batch = x_train[batch_mask] # 通过索引取出该值
t_batch = t_train[batch_mask] # 通过索引取出该监督值
# -------------------------mini_batch版交叉熵误差的实现,t为one_hot形式----------------------------------------------
def cross_entropy_error(y, t):
if y.ndim == 1: # 若神经网络输出y为一维数据(单个数据)
t = t.reshape(1, t.size) # 改变形状成行,后面的矩阵乘法
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(t * np.log(y + 1e-07)) / batch_size
# --------------------------mini_batch版交叉熵误差,t为非one_hot形式------------------------------------------------
def cross_entropy_error_2(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size) # 改变形状成行,后面的矩阵乘法
y = y.reshape(1, y.size)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-07)) / batch_size
# np.arange(batch_size)生成 0~batch_size-1 的数组,又因为t为非one-hot形式,故可以抽取其输出值,例如:y[0, 1]
深度学习入门——mini_batch小批量数据的交叉熵误差
最新推荐文章于 2023-06-19 15:01:09 发布