深度学习-----手写数字图片的识别训练

上课讲过的代码,记录一下~~

#1导入需要的各类包
import numpy as np
import torch
# 导入 pytorch 内置的 mnist 数据
from torchvision.datasets import mnist 
#导入预处理模块
import torchvision.transforms as transforms
from torchvision import transforms,datasets
from torch.utils.data import DataLoader
#导入nn及优化器
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"

#对数据的预处理操作
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5],[0.5])])


#加载数据集
train_dataset = mnist.MNIST('.\data',train = True,transform = transform,download = False)
test_dataset = mnist.MNIST('.\data',train = True,transform = transform,download = False)

#分批加载数据
train_loader = DataLoader(train_dataset,batch_size = 32,shuffle = True)
test_loader = DataLoader(test_dataset,batch_size = 32,shuffle = False)
# len(train_loader)
#构建神经网络(三层全连接)
class Net(nn.Module):
    def __init__(self,in_dim,n_hidden_1,n_hidden_2,out_dim):
        super().__init__()
        self.layer1 = nn.Sequential(nn.Linear(in_dim,n_hidden_1),nn.ReLU(True))
        self.layer2 = nn.Sequential(nn.Linear(n_hidden_1,n_hidden_2),nn.ReLU(True))
        self.layer3 = nn.Linear(n_hidden_2,out_dim)
    def forward(self,x):
#         x表示输入的数据
        x1 = self.layer1(x)
#        第一层的输出作为第二层的输入
        x2 = self.layer2(x1)
#     第二层的输出作为第三层的输入
        x3 = self.layer3(x2)
        return x3

# 将模型送CPU
device = torch.device('cpu')
#这里的图像的大小是1*28*28的,隐藏层的维度大小可以自己设置。
model = Net(28*28,300,100,10)
model.to(device)

# 定义优化器以及损失函数
criterion = nn.CrossEntropyLoss()
# 优化器
optimizer = optim.SGD(model.parameters(),lr = 0.01,momentum=0.5)

# 对模型进行训练
# 记录训练损失率和精确率
losses = []
accury = []
# 记录测试损失率和精确率
eval_losses = []
eval_accury = []


for epoch in range(10):
    train_losses = 0
    train_accury = 0
    model.train()
    
    for img, label in train_loader:
        img = img.to(device)
        label = label.to(device)
        
        img = img.view(img.size(0),-1)
        
        #将数据放入网络中,进行前向传播
        out = model.forward(img)
#       计算损失
        loss = criterion(out,label)
#       优化器梯度归零
        optimizer.zero_grad()
#       反向传播计算梯度
        loss.backward()
#       更新参数
        optimizer.step()
#       计算本轮损失
        train_losses +=loss.item()
#       torch.max(out,1)表示返回行最值的下标,这里的out.size()=torch.Size([batch_size=32,out_dim=10]),是一个二维的torch
#      (可以看为32*10的矩阵)
#       torch.max(out,1)按行求最值,返回最值下标。
        _,pred = torch.max(out,1)
#       这里的pred.size ()= label.szie(), torch.Size([10]),即比较预测成功了多少个
    
        num_courrect = (pred == label).sum().item()
#         img.shape[0] = batch_size= 32,就是一轮加载的数据
        acc = num_courrect/img.shape[0]
        train_accury+=acc
#     len(train_loader) = 数据集大小/batch_size,计算每次迭代的损失率和精确率
    losses.append(train_losses/len(train_loader))
    accury.append(train_accury/ len(train_loader))
    
    model.eval()
    loss_eval =0
    accury_eval=0
    for img,label in test_loader:
        img.to(device)
        label.to(device)
        
        img = img.view(img.size(0),-1)
        
        out = model.forward(img)
        loss = criterion(out,label)
        optimizer.zero_grad()
        optimizer.step()
        
        loss_eval+=loss.item()
        
        _,pred = out.max(1)
        num_correct = (pred == label).sum().item()
        
        acc =num_correct/img.shape[0]
        accury_eval+=acc
    eval_losses.append(loss_eval/len(test_loader))
    eval_accury.append(accury_eval/len(test_loader))
    
    print('epoch: {}, Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'
          .format(epoch, train_losses/len(train_loader), train_accury/ len(train_loader), 
                    loss_eval/len(test_loader), accury_eval/len(test_loader))) 
    
    
    
    
    # 对损失率进行可视化
plt.plot(np.arange(len(losses)),losses)
    
    
        
        

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值