Pytorch项目之MNIST

pytorch day1 mnist

1.数据加载

import torch
import torchvision
import torchvision.datasets as dset
import torchvision.transforms as transforms
transform = transforms.ToTensor()
train_data = dset.MNIST(root='./data/MNIST_PT_data/',train=True,transform=transform,download=False)
test_data = dset.MNIST(root='./data/MNIST_PT_data/',train=False,transform=transform,download=False)
train_loader = torch.utils.data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_data,batch_size=BATCH_SIZE,shuffle=False)

1.transforms模块 可以对模型进行数据清洗和数据增强
2.DataLoader模块 可以自动构建Loader

2.模型搭建和summary

from torchsummaryX import summary
from torch import nn
from torch.nn import Sequential
from torch.nn import Conv2d,MaxPool2d,Linear,LSTM
from torch.nn import Dropout,ReLU,BatchNorm2d
class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()
        self.layer1 = Sequential(
            Conv2d(1,32,5,padding=2), #1*28*28-> 32*28*28
            ReLU(True),
            Dropout(0.2),
            MaxPool2d(2,2) #3 2*14*14
        )
        self.layer2 = Sequential(
            Conv2d(32,64,5,padding=1), #32*14*14 -> 64*12*12
            ReLU(True),
            Dropout(0.2),
            MaxPool2d(2,2) #64*6*6
        )
        self.layer3 = Sequential(
            Linear(64*6*6,512),
            ReLU(True),
            Linear(512,64),
            ReLU(True),
            Linear(64,10)
        )
    
    def forward(self,x):
        conv1 = self.layer1(x)
        conv2 = self.layer2(conv1)
        conv2 = conv2.view(conv2.size(0),-1)
        out = self.layer3(conv2)
        return out
cnn = CNN().cuda()
cnn

summary模型的几个库
torchsummary不需要输入batch
tensorwatch不支持gpu

import torchsummary
import torchsummaryX.summary as summary
import tensorwatch as tw
summary(cnn,torch.zeros((128,1,28,28)).cuda())
torchsummary.summary(cnn,(1,28,28))
tw.model_stats(model,[128,1,28,28]) 

3.训练和评估

optimizer = torch.optim.SGD(cnn.parameters(),lr=LR)
criterion = torch.nn.CrossEntropyLoss()
scheduler=torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,T_max=50)
for epoch in tqdm(range(EPOCH)):
	loss_sigma = 0.0    # 记录一个epoch的loss之和
    correct = 0.0
    total = 0.0
    scheduler.step()
    for step,(b_x,b_y) in enumerate(train_loader):
        b_x = b_x.cuda()
        b_y = b_y.cuda()
        out = cnn(b_x)
        loss = criterion(out,b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
         y_pred = torch.max(out,1)[1]
        total += b_y.size(0)
        correct += (y_pred==b_y).sum().item()
        loss_sigma += loss.item()
        if step%100 == 0:
            accuary = eval_acc/(step+1)     
            loss_avg = loss_sigma/100
            loss_sigma = 0.0 
            accuary = correct/total
            print('EPOCH:{:0>3}/{:0>3} Iteration:{:0>3}/{:0/3} Loss:{.4f} Acc:{.2f%}'.format(epoch+1,EPOCH,
            step+1,len(train_loader),loss_avg,accuary)

1.optimizer
2.loss_func
3.torch.optim.lr_scheduler
4.metrics
5.with torch.no_grad() & eval()

correct = 0
total = 0
with torch.no_grad():
    for step,(x_test,y_test) in enumerate(test_loader):
        x_test = x_test.cuda()
        y_test = y_test.cuda()
        
        outputs = cnn(x_test)
        _, y_pred = torch.max(outputs.data, 1)
        total += y_test.size(0)
        correct += (y_pred == y_test).sum().item()
print('Accuracy of the network on the 10000 test images: %.2f %%' % (
    100 * correct / total))

6.model.save & model.load

torch.save(cnn.state_dict(),'./model/cnn_state_dict.pkl')
cnn_load = CNN()
cnn_load.load_state_dict(torch.load('./model/cnn_state_dict.pkl'))

7.tensorboard

writer = SummaryWriter(log_dir='./runs/mnist_6_17')
 writer.add_scalars('Loss_group', {'train_loss': loss_avg}, epoch)
 # 记录learning rate
 writer.add_scalar('learning rate', scheduler.get_lr()[0], epoch)
 # 记录Accuracy
 writer.add_scalars('Accuracy_group', {'train_acc': correct / total}, epoch)

for name, layer in net.named_parameters():
	 writer.add_histogram(name + '_grad', layer.grad.cpu().data.numpy(), epoch)
	 writer.add_histogram(name + '_data', layer.cpu().data.numpy(), epoch)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值