import numpy as np import torch from torch.utils.data import DataLoader from torchvision import datasets from torchvision import transforms import torch.nn as nn import torch.optim as optim from torch.utils.tensorboard import SummaryWriter # import warnings # warnings.filterwarnings('ignore') class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.feature_hook_imgs = {} self.features=nn.Sequential( nn.Conv2d(1,20,3,1), nn.ReLU(), nn.MaxPool2d((2,2),2), #2*2核的大小,步长为2 nn.Conv2d(20,50,kernel_size=(5,5),stride=1), nn.ReLU(), nn.MaxPool2d(2,2) ) self.adpt=nn.AdaptiveAvgPool2d((4,4)) self.classifi=nn.Sequential( nn.Linear(in_features=50*4*4,out_features=500), nn.ReLU(), nn.Linear(500,10) ) def forward(self,x): x=self.features(x) x=self.adpt(x) x=x.view(x.shape[0],-1) x=self.classifi(x) return x # x=x.view(x.shape[0],-1) # return self.classifi(x) # print(torch.flatten(x,1).shape) # return torch.flatten(x,1) def add_hooks(self): def create_hook_fn(idx): def hook_fn(_model,_input,_output): if idx == 1: # 第一层的输入,也就是原始图像 self.feature_hook_imgs[idx - 1] = _input[0].cpu() self.feature_hook_imgs[idx]=_output.cpu() return hook_fn for _idx,_layer in enumerate(self.features): _layer.register_forward_hook(create_hook_fn(_idx+1)) def add_image_summary(self,writer:SummaryWriter,step,prefix=None): if len(self.feature_hook_imgs)==0: return if prefix is None: prefix='layer' else: prefix=f'{prefix}_layer' for _k in self.feature_hook_imgs: _v=self.feature_hook_imgs[_k] _v=_v[0:1,...] if _k > 0: _v = torch.permute(_v, (1, 0, 2, 3)) writer.add_images(f'{prefix}_{_k}',_v,step) class AccuracyScore(nn.Module): def __init__(self): super(AccuracyScore, self).__init__() # noinspection PyMethodMayBeStatic def forward(self, y_pred, y_true): y_pred_dim = y_pred.dim() y_true_dim = y_true.dim() if y_pred_dim == y_true_dim: pass elif y_pred_dim == y_true_dim + 1: y_pred = torch.argmax(y_pred, dim=1) else: raise ValueError("格式异常!") y_pred = y_pred.to(y_true.dtype) correct = (y_pred == y_true) return torch.mean(correct.to(torch.float32)) if __name__=='__main__': # net=Net() # x=torch.rand(4,1,32,32) # output=net(x) # print(output.shape) _batch_size=8 total_epoch=10 transform=transforms.Compose([ transforms.RandomResizedCrop(size=(32,32),scale=(0.9,1)), #随机裁剪,比例为0.9 transforms.ToTensor() ]) trainset=datasets.MNIST(root=r'D:\pythonProject\0728',download=False,train=True,transform=transform) trainloader=DataLoader(trainset,shuffle=True,batch_size=_batch_size) testset=datasets.MNIST(root=r'D:\pythonProject\0728',download=False,train=False,transform=transform) testloader=DataLoader(shuffle=False,batch_size=_batch_size,dataset=testset) net=Net() net.add_hooks() loss_fn=nn.CrossEntropyLoss() opt=optim.SGD(net.parameters(),lr=0.01) acc_fn=AccuracyScore() writer = SummaryWriter(log_dir='./output1/mnist_net03') writer.add_graph(net, torch.empty(_batch_size,1,32,32)) train_step = 0 test_step=0 summary_interval=200 for epoch in range(total_epoch): net.train(True) train_loss=[] for data in trainloader: inputs,labels=data outputs = net(inputs) _loss = loss_fn(outputs, labels) _acc = acc_fn(outputs, labels) # 反向过程 opt.zero_grad() _loss.backward() opt.step() train_loss.append(_loss.item()) if train_step % summary_interval==0: writer.add_scalar('train_loss',_loss,train_step) writer.add_scalar('train_acc',_acc,train_step) net.add_image_summary(writer,train_step,'train') print(f"Train {epoch + 1}/{total_epoch} " f'train_step:{train_step}' f"loss:{_loss.item():.3f} accuracy:{_acc.item():.3f}") train_step += 1 net.eval() test_loss=[] for data in testloader: inputs,labels=data outputs=net(inputs) _test_loss=loss_fn(outputs,labels) _test_acc=acc_fn(outputs,labels) test_loss.append(_test_loss.item()) if test_step%summary_interval==0: writer.add_scalar('test_loss',_test_loss,test_step) writer.add_scalar('test_acc',_test_acc,test_step) net.add_image_summary(writer,test_step,'test') print(f"Test {epoch + 1}/{total_epoch} " f'test_step:{test_step}' f"loss:{_test_loss.item():.3f} accuracy:{_test_acc.item():.3f}") test_step += 1 writer.add_scalars('epoch_loss', { 'train':np.mean(train_loss), 'test':np.mean(test_loss) }, epoch) ''' tensorboard --logdir D:\pythonProject\0728\output1\minst_net02 localhost:6006 '''
mnist数据集手写数字识别可视化
于 2022-08-06 15:28:34 首次发布