网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。
一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!
# display iteration training info
if iteration % self.cfg['display_freq'] == 0:
print("Epoch [{}][{}]\t Batch [{}][{}]\t Training Loss {:.4f}\t Accuracy {:.4f}".format(
epoch, max_epoch, iteration, len(self.train_loader), loss, acc))
avg_train_loss, avg_train_acc = np.mean(iteration_train_loss), np.mean(iteration_train_acc)
epoch_train_loss.append(avg_train_loss)
epoch_train_acc.append(avg_train_acc)
# validate
avg_val_loss, avg_val_acc = self.validate()
# display epoch training info
print('\nEpoch [{}]\t Average training loss {:.4f}\t Average training accuracy {:.4f}'.format(
epoch, avg_train_loss, avg_train_acc))
# display epoch valiation info
print('Epoch [{}]\t Average validation loss {:.4f}\t Average validation accuracy {:.4f}\n'.format(
epoch, avg_val_loss, avg_val_acc))
return epoch_train_loss, epoch_train_acc
def validate(self):
logits_set, labels_set = [], []
for images, labels in self.val_loader:
logits = self.model.forward(images)
logits_set.append(logits)
labels_set.append(labels)
logits = np.concatenate(logits_set)
labels = np.concatenate(labels_set)
loss, acc = self.criterion.forward(logits, labels)
return loss, acc
def test(self):
logits_set, labels_set = [], []
for images, labels in self.test_loader:
logits = self.model.forward(images)
logits_set.append(logits)
labels_set.append(labels)
logits = np.concatenate(logits_set)
labels = np.concatenate(labels_set)
loss, acc = self.criterion.forward(logits, labels)
return loss, acc
if name == ‘main’:
# You can modify the hyerparameters by yourself.
relu_cfg = {
‘data_root’: ‘data’,
‘max_epoch’: 10,
‘batch_size’: 100,
‘learning_rate’: 0.1,
‘momentum’: 0.9,
‘display_freq’: 50,
‘activation_function’: ‘relu’,
}
runner = Solver(relu_cfg)
relu_loss, relu_acc = runner.train()
test_loss, test_acc = runner.test()
print('Final test accuracy {:.4f}\n'.format(test_acc))
# You can modify the hyerparameters by yourself.
sigmoid_cfg = {
'data_root': 'data',
'max_epoch': 10,
'batch_size': 100,
'learning_rate': 0.1,
'momentum': 0.9,
'display_freq': 50,
'activation_function': 'sigmoid',
}
runner = Solver(sigmoid_cfg)
sigmoid_loss, sigmoid_acc = runner.train()
test_loss, test_acc = runner.test()
print('Final test accuracy {:.4f}\n'.format(test_acc))
plot_loss_and_acc({
"relu": [relu_loss, relu_acc],
"sigmoid": [sigmoid_loss, sigmoid_acc],
})
dataloader.py
import os
import struct
import numpy as np
class Dataset(object):
def __init__(self, data_root, mode='train', num_classes=10):
assert mode in ['train', 'val', 'test']
# load images and labels
kind = {'train': 'train', 'val': 'train', 'test': 't10k'}[mode]
labels_path = os.path.join(data_root, '{}-labels-idx1-ubyte'.format(kind))
images_path = os.path.join(data_root, '{}-images-idx3-ubyte'.format(kind))
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II', lbpath.read(8))
labels = np.fromfile(lbpath, dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))
images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)
if mode == 'train':
# training images and labels
self.images = images[:55000] # shape: (55000, 784)
self.labels = labels[:55000] # shape: (55000,)
elif mode == 'val':
# validation images and labels
self.images = images[55000:] # shape: (5000, 784)
self.labels = labels[55000:] # shape: (5000, )
else:
# test data
self.images = images # shape: (10000, 784)
self.labels = labels # shape: (10000, )
self.num_classes = 10
def __len__(self):
return len(self.images)
def __getitem__(self, idx):
image = self.images[idx]
label = self.labels[idx]
# Normalize from [0, 255.] to [0., 1.0], and then subtract by the mean value
image = image / 255.0
image = image - np.mean(image)
return image, label
class IterationBatchSampler(object):
def __init__(self, dataset, max_epoch, batch_size=2, shuffle=True):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
def prepare_epoch_indices(self):
indices = np.arange(len(self.dataset))
if self.shuffle:
np.random.shuffle(indices)
num_iteration = len(indices) // self.batch_size + int(len(indices) % self.batch_size)
self.batch_indices = np.split(indices, num_iteration)
def __iter__(self):
return iter(self.batch_indices)
def __len__(self):
return len(self.batch_indices)
class Dataloader(object):
def __init__(self, dataset, sampler):
self.dataset = dataset
self.sampler = sampler
def __iter__(self):
self.sampler.prepare_epoch_indices()
for batch_indices in self.sampler:
batch_images = []
batch_labels = []
for idx in batch_indices:
img, label = self.dataset[idx]
batch_images.append(img)
batch_labels.append(label)
batch_images = np.stack(batch_images)
batch_labels = np.stack(batch_labels)
yield batch_images, batch_labels
def __len__(self):
return len(self.sampler)
def build_dataloader(data_root, max_epoch, batch_size, shuffle=False, mode=‘train’):
dataset = Dataset(data_root, mode)
sampler = IterationBatchSampler(dataset, max_epoch, batch_size, shuffle)
data_lodaer = Dataloader(dataset, sampler)
return data_lodaer
loss.py
import numpy as np
a small number to prevent dividing by zero, maybe useful for you
EPS = 1e-11
class SoftmaxCrossEntropyLoss(object):
def forward(self, logits, labels):
"""
Inputs: (minibatch)
- logits: forward results from the last FCLayer, shape (batch_size, 10)
- labels: the ground truth label, shape (batch_size, )
"""
############################################################################
# TODO: Put your code here
# Calculate the average accuracy and loss over the minibatch
# Return the loss and acc, which will be used in solver.py
# Hint: Maybe you need to save some arrays for backward
self.one_hot_labels = np.zeros_like(logits)
self.one_hot_labels[np.arange(len(logits)), labels] = 1
self.prob = np.exp(logits) / (EPS + np.exp(logits).sum(axis=1, keepdims=True))
# calculate the accuracy
preds = np.argmax(self.prob, axis=1) # self.prob, not logits.
acc = np.mean(preds == labels)
# calculate the loss
loss = np.sum(-self.one_hot_labels * np.log(self.prob + EPS), axis=1)
loss = np.mean(loss)
############################################################################
return loss, acc
def backward(self):
############################################################################
# TODO: Put your code here
# Calculate and return the gradient (have the same shape as logits)
return self.prob - self.one_hot_labels
############################################################################
network.py
class Network(object):
def init(self):
self.layerList = []
self.numLayer = 0
def add(self, layer):
self.numLayer += 1
self.layerList.append(layer)
def forward(self, x):
# forward layer by layer
for i in range(self.numLayer):
x = self.layerList[i].forward(x)
return x
def backward(self, delta):
# backward layer by layer
for i in reversed(range(self.numLayer)): # reversed
delta = self.layerList[i].backward(delta)
optimizer.py
import numpy as np
class SGD(object):
def init(self, model, learning_rate, momentum=0.0):
self.model = model
self.learning_rate = learning_rate
self.momentum = momentum
def step(self):
"""One backpropagation step, update weights layer by layer"""
layers = self.model.layerList
for layer in layers:
if layer.trainable:
############################################################################
# TODO: Put your code here
# Calculate diff_W and diff_b using layer.grad_W and layer.grad_b.
网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。
一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!
IL1aQhoX-1715514645034)]
[外链图片转存中…(img-Hp8Nuohd-1715514645034)]
网上学习资料一大堆,但如果学到的知识不成体系,遇到问题时只是浅尝辄止,不再深入研究,那么很难做到真正的技术提升。
一个人可以走的很快,但一群人才能走的更远!不论你是正从事IT行业的老鸟或是对IT行业感兴趣的新人,都欢迎加入我们的的圈子(技术交流、学习资源、职场吐槽、大厂内推、面试辅导),让我们一起学习成长!