参考:(12条消息) 智能计算系统实验2_名字不如叫摸鱼的博客-CSDN博客
文件结构
|-- main_exp_2_1. py 主文件
|-- readme.txt
`-- stu_upload 具体的实现文件夹,填空就在这个文件夹
|-- __init__. py
|-- layers_1. py 全连接层、relu 层、softmax 层实现
`-- mnist_mlp_cpu. py 组网
代码
Layers_1 . py
函数解释
np. random. normal
:从正态(高斯)分布中抽取随机样本, loc 表示均值,scale 表示均值,size 表示输出的维度np.zeros
:返回来一个给定形状和类型的用 0 填充的数组np.dot
:矩阵乘法- axis 参数等于 0 的时候表示压缩行,等于 1 的时候压缩列
- Keepdims 表示输出是否保持原来的维度
代码详解
注意维度要对齐,relu 层和 softmax 层没有需要更新的参数
# coding=utf-8
import numpy as np
import struct
import os
import time
class FullyConnectedLayer(object):
def __init__(self, num_input, num_output): # 全连接层初始化
self.num_input = num_input
self.num_output = num_output
print('\tFully connected layer with input %d, output %d.' % (self.num_input, self.num_output))
def init_param(self, std=0.01): # 参数初始化
self.weight = np.random.normal(loc=0.0, scale=std, size=(self.num_input, self.num_output))
self.bias = np.zeros([1, self.num_output])
#注意偏置初始化为零
def forward(self, input): # 前向传播计算
start_time = time.time()
self.input = input
# TODO:全连接层的前向传播,计算输出结果
#Y = XW + b (2.3)
# 注意这里矩阵乘法的顺序是先X后W
self.output = np.dot(self.input, self.weight) + self.bias
return self.output
def backward(self, top_diff): # 反向传播的计算
# TODO:全连接层的反向传播,计算参数梯度和本层损失
#(2.4)
#注意d_weight的维度应该是(m, n),所以矩阵是X的转置乘top_diff
self.d_weight = np.dot(self.input.T, top_diff)
#书上实际上top_diff应该是(1, num_output),但是因为工程上算梯度实际上是批量进行计算,所以实际上维度是(batch_size, num_output)
self.batch_size = top_diff.shape[0]
#为了保证维度的正确,(1, batch_size)和(batch_size, num_output)相乘
self.d_bias = np.dot(np.ones(shape=(1,self.batch_size)),top_diff)
bottom_diff = np.dot(top_diff, self.weight.T)
return bottom_diff
def update_param(self, lr): # 参数更新
# TODO:对全连接层参数利用参数进行更新
self.weight = self.weight - lr * self.d_weight
self.bias = self.bias - lr * self.d_bias
def load_param(self, weight, bias): # 参数加载
assert self.weight.shape == weight.shape
assert self.bias.shape == bias.shape
self.weight = weight
self.bias = bias
def save_param(self): # 参数保存
return self.weight, self.bias
class ReLULayer(object):
def __init__(self):
print('\tReLU layer.')
def forward(self, input): # 前向传播的计算
start_time = time.time()
self.input = input
# TODO:ReLU层的前向传播,计算输出结果
#(2.5)
output = np.maximum(0, self.input)
return output
def backward(self, top_diff): # 反向传播的计算
# TODO:ReLU层的反向传播,计算本层损失
#(2.6)
bottom_diff = top_diff
bottom_diff[self.input < 0] = 0
return bottom_diff
class SoftmaxLossLayer(object):
def __init__(self):
print('\tSoftmax loss layer.')
def forward(self, input): # 前向传播的计算
# TODO:softmax 损失层的前向传播,计算输出结果
#(2.11)
#input的维度是(batch_size, num_input)
input_max = np.max(input, axis=1, keepdims=True) #input_max维度是(batch_size, 1)
input_exp = np.exp(input - input_max)#input_exp维度是(batch_size, num_input)
#注意np.sum的返回值是一维数组,所以需要加上keepdims参数
self.prob = input_exp / np.sum(input_exp, axis=1, keepdims=True)
return self.prob
def get_loss(self, label): # 计算损失
#self.prob的维度是(batch_size, num_input)
#(2.12)
self.batch_size = self.prob.shape[0]
self.label_onehot = np.zeros_like(self.prob)
self.label_onehot[np.arange(self.batch_size), label] = 1.0 #batch_size行,label列变成1
loss = -np.sum(np.log(self.prob) * self.label_onehot) / self.batch_size
return loss
def backward(self): # 反向传播的计算
# TODO:softmax 损失层的反向传播,计算本层损失
#(2.13)
bottom_diff = (self.prob - self.label_onehot) / self.batch_size
return bottom_diff
mnist_mlp_cpu. py
函数解释
struct.unpack_from
:按照格式字符串从偏移处开始解包文件,参数分别是格式字符串、解包文件、偏移- 格式字符串 https://www.cnblogs.com/flydean/p/14665510.html#%E5%AD%97%E8%8A%82%E9%A1%BA%E5%BA%8F%E5%A4%A7%E5%B0%8F%E5%92%8C%E5%AF%B9%E9%BD%90%E6%96%B9%E5%BC%8F
np.random.shuffle
:打乱数组,没有返回值np.argmax
:返回元素最大值所对应的索引值
代码详解
# coding=utf-8
import numpy as np
import struct
import os
import time
from layers_1 import FullyConnectedLayer, ReLULayer, SoftmaxLossLayer
MNIST_DIR = "../mnist_data"
TRAIN_DATA = "train-images-idx3-ubyte"
TRAIN_LABEL = "train-labels-idx1-ubyte"
TEST_DATA = "t10k-images-idx3-ubyte"
TEST_LABEL = "t10k-labels-idx1-ubyte"
class MNIST_MLP(object):
def __init__(self, batch_size=1000, input_size=784, hidden1=32, hidden2=16, out_classes=10, lr=0.01, max_epoch=1, print_iter=100):
self.batch_size = batch_size
self.input_size = input_size
self.hidden1 = hidden1
self.hidden2 = hidden2
self.out_classes = out_classes
self.lr = lr
self.max_epoch = max_epoch
self.print_iter = print_iter
def load_mnist(self, file_dir, is_images = 'True'):
# Read binary data
bin_file = open(file_dir, 'rb')
bin_data = bin_file.read()
bin_file.close()
# Analysis file header
if is_images:
# Read images
fmt_header = '>iiii' #>表示大端序,i表示int,四个字节,对应数据集的格式
magic, num_images, num_rows, num_cols = struct.unpack_from(fmt_header, bin_data, 0)
else:
# Read labels
fmt_header = '>ii'
magic, num_images = struct.unpack_from(fmt_header, bin_data, 0)
num_rows, num_cols = 1, 1
data_size = num_images * num_rows * num_cols
#B表示1个字节,struct.calcsize返回与格式字符串 format 相对应的结构的大小,也就是跳过了数据集文件的文件头
mat_data = struct.unpack_from('>' + str(data_size) + 'B', bin_data, struct.calcsize(fmt_header))
mat_data = np.reshape(mat_data, [num_images, num_rows * num_cols])
#这里吧mat_data拉成了每行表示一个图片的结构
print('Load images from %s, number: %d, data shape: %s' % (file_dir, num_images, str(mat_data.shape)))
return mat_data
def load_data(self):
# TODO: 调用函数 load_mnist 读取和预处理 MNIST 中训练数据和测试数据的图像和标记
print('Loading MNIST data from files...')
train_images = self.load_mnist(os.path.join(MNIST_DIR, TRAIN_DATA), True)
train_labels = self.load_mnist(os.path.join(MNIST_DIR, TRAIN_LABEL), False)
test_images = self.load_mnist(os.path.join(MNIST_DIR, TEST_DATA), True)
test_labels = self.load_mnist(os.path.join(MNIST_DIR, TEST_LABEL), False)
self.train_data = np.append(train_images, train_labels, axis=1)
self.test_data = np.append(test_images, test_labels, axis=1)
def shuffle_data(self):
print('Randomly shuffle MNIST data...')
np.random.shuffle(self.train_data)
def build_model(self): # 建立网络结构
# TODO:建立三层神经网络结构
print('Building multi-layer perception model...')
self.fc1 = FullyConnectedLayer(self.input_size, self.hidden1)
self.relu1 = ReLULayer()
self.fc2 = FullyConnectedLayer(self.hidden1, self.hidden2)
self.relu2 = ReLULayer()
self.fc3 = FullyConnectedLayer(self.hidden2, self.out_classes)
self.softmax = SoftmaxLossLayer()
self.update_layer_list = [self.fc1, self.fc2, self.fc3]
def init_model(self):
print('Initializing parameters of each layer in MLP...')
for layer in self.update_layer_list:
layer.init_param()
def load_model(self, param_dir):
print('Loading parameters from file ' + param_dir)
params = np.load(param_dir).item()
self.fc1.load_param(params['w1'], params['b1'])
self.fc2.load_param(params['w2'], params['b2'])
self.fc3.load_param(params['w3'], params['b3'])
def save_model(self, param_dir):
print('Saving parameters to file ' + param_dir)
params = {}
params['w1'], params['b1'] = self.fc1.save_param()
params['w2'], params['b2'] = self.fc2.save_param()
params['w3'], params['b3'] = self.fc3.save_param()
np.save(param_dir, params)
def forward(self, input): # 神经网络的前向传播
# TODO:神经网络的前向传播
h1 = self.fc1.forward(input)
h1 = self.relu1.forward(h1)
h2 = self.fc2.forward(h1)
h2 = self.relu2.forward(h2)
h3 = self.fc3.forward(h2)
prob = self.softmax.forward(h3)
return prob
def backward(self): # 神经网络的反向传播
# TODO:神经网络的反向传播
dloss = self.softmax.backward()
dh3 = self.fc3.backward(dloss)
dh2 = self.relu2.backward(dh3)
dh2 = self.fc2.backward(dh2)
dh1 = self.relu1.backward(dh2)
dh1 = self.fc1.backward(dh1)
def update(self, lr):
for layer in self.update_layer_list:
layer.update_param(lr)
def train(self):
max_batch = self.train_data.shape[0] / self.batch_size
print('Start training...')
for idx_epoch in range(self.max_epoch):
self.shuffle_data() # 每个epoch都要进行打乱
for idx_batch in range(max_batch):
batch_images = self.train_data[idx_batch*self.batch_size:(idx_batch+1)*self.batch_size, :-1]#切片不包含第二个参数表示的那一行(列)
batch_labels = self.train_data[idx_batch*self.batch_size:(idx_batch+1)*self.batch_size, -1] #-1表示最后一列
prob = self.forward(batch_images)
loss = self.softmax.get_loss(batch_labels)
self.backward()
self.update(self.lr)
if idx_batch % self.print_iter == 0:
print('Epoch %d, iter %d, loss: %.6f' % (idx_epoch, idx_batch, loss))
def evaluate(self):
pred_results = np.zeros([self.test_data.shape[0]])
for idx in range(self.test_data.shape[0]/self.batch_size):
batch_images = self.test_data[idx*self.batch_size:(idx+1)*self.batch_size, :-1]
start = time.time()
prob = self.forward(batch_images)# prob维度是(batch_size, 10)
end = time.time()
print("inferencing time: %f"%(end-start))
pred_labels = np.argmax(prob, axis=1) # pred_labels维度是(batch_size, 1)
pred_results[idx*self.batch_size:(idx+1)*self.batch_size] = pred_labels
accuracy = np.mean(pred_results == self.test_data[:,-1])
print('Accuracy in test set: %f' % accuracy)
def build_mnist_mlp(param_dir='weight.npy'):
h1, h2, e = 320, 160, 200
mlp = MNIST_MLP(hidden1=h1, hidden2=h2, max_epoch=e)
mlp.load_data()
mlp.build_model()
mlp.init_model()
mlp.train()
mlp.save_model('mlp-%d-%d-%depoch.npy' % (h1, h2, e))
# mlp.load_model('mlp-%d-%d-%depoch.npy' % (h1, h2, e))
return mlp
if __name__ == '__main__':
mlp = build_mnist_mlp()
mlp.evaluate()
main_exp_2_1. py
该文件中的功能和 mnist_mlp_cpu. py 中的 evaluate 函数基本相同,不赘述。