#神经网络训练过程可视化--以手写体数字识别卷积神经网络为例
import torch
import torch.nn as nn
import torchvision
import torchvision.utils as vutils
from torch.optim import SGD
import torch.utils.data as Data
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
#数据集加载 和预处理
train_data=torchvision.datasets.MNIST(
root="Data",
train=True,
#将数据转化为torch使用的张量 取值范围为【0,1】
transform=torchvision.transforms.ToTensor(),
# download=True,
download=False,
)
train_loader=Data.DataLoader(
dataset=train_data,
batch_size=128,
shuffle=True,
num_workers=0,
)
test_data=torchvision.datasets.MNIST(
root="Data",
train=False,
# download=True,
download=False,
)
#测试集数据test_data.data
test_data_x=test_data.data.type(torch.FloatTensor)/255.0#转化为张量并规范化,取值范围缩放到【0,1】区间
#增加一个通道维度
test_data_x=torch.unsqueeze(test_data_x,dim=1)
test_data_y=test_data.targets#测试集标签test_data.targets
print("test_data_x.shape:",test_data_x.shape)
print("test_data_y.shape:",test_data_y.shape)
# #检查训练数据集的一个batch的样本的维度是否正确
for step,(b_x,b_y) in enumerate(train_loader):
if step>0:
break
print("b_x.shape:",b_x.shape)
print("b_y.shape:",b_y.shape)
print("b_x.dtype:",b_x.dtype)
print("b_y.dtype:",b_y.dtype)
#搭建一个卷积神经网络
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet,self).__init__()
#定义第一个卷积层
self.conv1=nn.Sequential(
# 输入维度1 输出维度16 卷积核尺寸3x3,卷积核步长1 进行填充
nn.Conv2d(in_channels=1,out_channels=16,kernel_size=3,stride=1,padding=1,),#1x16x28x28
nn.ReLU(),#激活函数
nn.AvgPool2d(kernel_size=2,stride=2),#平均池化层 2x2 步长2 1x16x14x14
)
#定义第二个卷积层
self.conv2=nn.Sequential(
nn.Conv2d(16,32,3,1,1),#1x32x14x14
nn.ReLU(),
nn.MaxPool2d(2,2)#1x32x7x7
)
#定义全连接层
self.fc=nn.Sequential(
# 这里之前有一个展平的步骤x=x.view(x.size(0),-1)#展平多维的卷积图层 所以维度改变 32x7x7
# 1x1568(32*7*7)
nn.Linear(in_features=32*7*7,out_features=128),#1x128
nn.ReLU(),
nn.Linear(128,64),#1x64
nn.ReLU()
)
#最后的分类层
self.out=nn.Linear(64,10)#1x10
#定义网络的前向传播路径:
def forward(self,x):
x=self.conv1(x)
x=self.conv2(x)
x=x.view(x.size(0),-1)#展平多维的卷积图层
x=self.fc(x)
output=self.out(x)
return output
import hiddenlayer as hl
import time
MyConvnet=ConvNet()#初始化网络模型
optimizer=torch.optim.Adam(MyConvnet.parameters(),lr=0.0003)#定义优化器
loss_func=nn.CrossEntropyLoss()#损失函数
history1=hl.History()#记录训练过程的指标
canvas1=hl.Canvas()#use Canvas() 可视化
print_step=100#per 100 迭代 输出损失
# #进行训练并输出每次迭代的损失函数
# for epoch in range(30):#对整个数据集训练的次数30次
# for step,(b_x,b_y) in enumerate(train_loader):#循环利用数据加载器train_loader中的每一个batch对模型参数进行优化
# out_put = MyConvnet(b_x)
# loss = loss_func(out_put, b_y)
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
for epoch in range(5):
for step,(b_x,b_y) in enumerate(train_loader):
out_put=MyConvnet(b_x)
loss=loss_func(out_put,b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step%print_step==0 :
out_put=MyConvnet(test_data_x)
_,pre_lab=torch.max(out_put,1)
acc=accuracy_score(test_data_y,pre_lab)
history1.log((epoch,step),train_loss=loss,test_acc=acc,hidden_weight=MyConvnet.fc[2].weight)
with canvas1:
canvas1.draw_plot(history1["train_loss"])
canvas1.draw_plot(history1["test_acc"])
canvas1.draw_image(history1["hidden_weight"])
HiddenLayer可视化CNN训练过程
最新推荐文章于 2023-12-14 07:58:57 发布