# 构建 summary writer,设置文件路径from torch.utils.tensorboard import SummaryWriter
# default `log_dir` is "runs" - we'll be more specific here
writer = SummaryWriter('runs/fashion_mnist_experiment_1')# 将图片写入event文件中# get some random training images
dataiter =iter(trainloader)
images, labels = dataiter.next()# create grid of images
img_grid = torchvision.utils.make_grid(images)# show images
matplotlib_imshow(img_grid, one_channel=True)# write to tensorboard
writer.add_image('four_fashion_mnist_images', img_grid)# 最终使用# tensorboard --logdir /path/to/log
2. 查看模型结构
writer.add_graph(net, images)
writer.close()
3. 查看参数、变量的投影
使用 lower dimensional representation 来表示 higher dimensional data
# helper functiondefselect_n_random(data, labels, n=100):'''
Selects n random datapoints and their corresponding labels from a dataset
'''assertlen(data)==len(labels)
perm = torch.randperm(len(data))return data[perm][:n], labels[perm][:n]# select random images and their target indices
images, labels = select_n_random(trainset.data, trainset.targets)# get the class labels for each image
class_labels =[classes[lab]for lab in labels]# log embeddings
features = images.view(-1,28*28)
writer.add_embedding(features,
metadata=class_labels,
label_img=images.unsqueeze(1))
writer.close()
4. 记录训练过程
记录损失函数的变化。
偶尔放几张图片展示预测结果。
# 生成需要展示的图片defimages_to_probs(net, images):'''
Generates predictions and corresponding probabilities from a trained
network and a list of images
'''
output = net(images)# convert output probabilities to predicted class
_, preds_tensor = torch.max(output,1)
preds = np.squeeze(preds_tensor.numpy())return preds,[F.softmax(el, dim=0)[i].item()for i, el inzip(preds, output)]defplot_classes_preds(net, images, labels):'''
Generates matplotlib Figure using a trained network, along with images
and labels from a batch, that shows the network's top prediction along
with its probability, alongside the actual label, coloring this
information based on whether the prediction was correct or not.
Uses the "images_to_probs" function.
'''
preds, probs = images_to_probs(net, images)# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(12,48))for idx in np.arange(4):
ax = fig.add_subplot(1,4, idx+1, xticks=[], yticks=[])
matplotlib_imshow(images[idx], one_channel=True)
ax.set_title("{0}, {1:.1f}%\n(label: {2})".format(
classes[preds[idx]],
probs[idx]*100.0,
classes[labels[idx]]),
color=("green"if preds[idx]==labels[idx].item()else"red"))return fig
running_loss =0.0for epoch inrange(1):# loop over the dataset multiple timesfor i, data inenumerate(trainloader,0):# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()if i %1000==999:# every 1000 mini-batches...# 添加标量# ...log the running loss
writer.add_scalar('training loss',
running_loss /1000,
epoch *len(trainloader)+ i)# 添加图片# ...log a Matplotlib Figure showing the model's predictions on a# random mini-batch
writer.add_figure('predictions vs. actuals',
plot_classes_preds(net, inputs, labels),
global_step=epoch *len(trainloader)+ i)
running_loss =0.0print('Finished Training')