tensorboard 可视化神经网络
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1,out_channels=10,kernel_size=5)
self.conv2 = nn.Conv2d(in_channels=10,out_channels=20,kernel_size=5)
self.conv2_drop = nn.Dropout2d() # 维度不变,只是把通道C里的数据按概率赋值为0
self.fc1 = nn.Linear(in_features=320,out_features=50)
self.fc2 = nn.Linear(in_features=50,out_features=10)
self.bn = nn.BatchNorm2d(num_features=20) # C = 20
# Batch Normalization强行将数据拉回到均值为0,方差为1的正太分布上,一方面使得数据分布一致,另一方面避免梯度消失。
def forward(self,x):
x = F.max_pool2d(self.conv1(x),2)
x = F.relu(x) + F.relu(-x)
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)),kernel_size=2))
x = self.bn(x)
x = x.view(-1,320)
x = F.relu(self.fc1(x))
x = F.dropout(x,training=self.training) # 随机采样,防止过拟合
x = self.fc2(x)
x = F.softmax(x,dim=1)
return x
input = torch.rand(32,1,28,28) # (N样本数量,C图像通道数,W宽,H高)
model = Net()
writer = SummaryWriter('tensorb')
writer.add_graph(model,(input,))
writer.close()
在Terminal终端打开输入 tensorboard --logdir=tensorb
tensorboard 可视化损失值
import numpy as np
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
dtype = torch.FloatTensor
writer = SummaryWriter('loss')
np.random.seed(100)
x_train = np.linspace(-1,1,100).reshape(100,1)
y_train = 3 * np.power(x_train,2) + 2 + 0.2*np.random.rand(x_train.size).reshape(100,1)
model = nn.Linear(1,100)
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr = 1)
for epoch in range(100):
inputs = torch.from_numpy(x_train).type(dtype)
targets = torch.from_numpy(y_train).type(dtype)
output = model(inputs)
loss = criterion(output,targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
writer.add_scalar('训练损失值',loss,epoch)