pytorch实现多分类模型

数据集使用的是mnist

import torch 
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torch.utils.data import TensorDataset
import torchvision.transforms as transforms

# 定义模型
class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        # 五个全连接层
        self.linear1 = nn.Linear(784,512)
        self.linear2 = nn.Linear(512,256)
        self.linear3 = nn.Linear(256,128)
        self.linear4 = nn.Linear(128,64)
        self.linear5 = nn.Linear(64,10)
    
    def forward(self,x):
        # 把28*28压成1*784
        x = x.view(-1,784)
        # 前四层用relu激活
        x = F.relu(self.linear1(x))
        x = F.relu(self.linear2(x))
        x = F.relu(self.linear3(x))
        x = F.relu(self.linear4(x))
        
        return self.linear5(x)
        

# 批处理大小
batch_size = 64
# 把数据转成tensor,并遵从正态分布
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
    ])
# 训练集
train_dataset = MNIST(root=r'C:/Users/Administrator/Desktop/data/', 
                      train=True, 
                      download=True, 
                      transform=transform)

train_loader = DataLoader(train_dataset,
                         shuffle=True,
                         batch_size=batch_size)
# 测试集
test_dataset = MNIST(root=r'C:/Users/Administrator/Desktop/data/', 
                      train=False, 
                      download=True, 
                      transform=transform)

test_loader = DataLoader(test_dataset,
                         shuffle=True,
                         batch_size=batch_size)
# 实例化模型
model = Net()

print(model)
# 使用交叉熵损失函数
criterion = nn.CrossEntropyLoss()
# 使用带有动量的随机梯度下降
optimizer = torch.optim.SGD(model.parameters(),lr=0.01,momentum=0.5)
# 用于存储损失
loss_list = []
for epoch in range(10):
    
    for batch,(X,y) in enumerate(train_loader):
        
        y_pred = model(X)
        
        loss = criterion(y_pred,y)
        
        optimizer.zero_grad()
        
        loss.backward()
        
        optimizer.step()
        
        
        if batch % 300 == 0:
            loss_list.append(loss.data.item())
            print("loss------------",loss.data.item())

# 显示损失下降的图像        
plt.plot(np.linspace(0,1000,len(loss_list)),loss_list)
plt.show()        
# 检验测试集的正确率
rets = []
total = 0
correct = 0
# 不需要计算梯度
with torch.no_grad():
    for data in test_loader:

        X, y = data 
        y_pred = model(X)
        # 返回值有两个,第一个是最大的值,第二个是最大值的索引
        _,predicted = torch.max(y_pred.data,dim=1)

        total += y.size(0)
        correct += (predicted == y).sum().item()

print('accuracy on test set: %.2f %% ' % (100.0*(correct/total)))


在这里插入图片描述

直观感受正确率可以画出图形

plt.figure(figsize=(20,20))
for i in range(80):
    plt.subplot(8,10,i+1)
    plt.xticks()
    plt.yticks()
    plt.grid(False)
    plt.imshow(test_dataset.data[i],cmap=plt.cm.binary_r)
    
    X = torch.FloatTensor(np.array(test_dataset.data[i]))
    y_pred = model(X)
    _,predicted = torch.max(y_pred.data,dim=1)
    
    if test_dataset.targets[i].data.item() == predicted.data.item():
        plt.title(test_dataset.targets[i].data.item())
    else:
        plt.title(predicted.data.item(),color='red')

plt.show()

判断错误的会显示红色

在这里插入图片描述
遗留问题:
在第五层输出加了个softmax,
结果损失下降的速度大幅度变慢,
而且正确率也下降了,
不明白为什么???

代码的差距只有Net类里的forward方法
问题解决:
这里用的CrossEntropyLoss()
已经包含了softmax的功能

import torch 
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torch.utils.data import TensorDataset
import torchvision.transforms as transforms


class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.linear1 = nn.Linear(784,512)
        self.linear2 = nn.Linear(512,256)
        self.linear3 = nn.Linear(256,128)
        self.linear4 = nn.Linear(128,64)
        self.linear5 = nn.Linear(64,10)
    
    def forward(self,x):
        x = x.view(-1,784)
        x = F.relu(self.linear1(x))
        x = F.relu(self.linear2(x))
        x = F.relu(self.linear3(x))
        x = F.relu(self.linear4(x))
        x = F.softmax(self.linear5(x),dim=1)      
        return x
        

batch_size = 64
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
    ])

train_dataset = MNIST(root=r'C:/Users/Administrator/Desktop/data/', 
                      train=True, 
                      download=True, 
                      transform=transform)

train_loader = DataLoader(train_dataset,
                         shuffle=True,
                         batch_size=batch_size)

test_dataset = MNIST(root=r'C:/Users/Administrator/Desktop/data/', 
                      train=False, 
                      download=True, 
                      transform=transform)

test_loader = DataLoader(test_dataset,
                         shuffle=True,
                         batch_size=batch_size)

model = Net()

print(model)

criterion = nn.CrossEntropyLoss()

optimizer = torch.optim.SGD(model.parameters(),lr=0.01,momentum=0.5)

loss_list = []
for epoch in range(10):
    
    for batch,(X,y) in enumerate(train_loader):
        
        y_pred = model(X)
        
        loss = criterion(y_pred,y)
        
        optimizer.zero_grad()
        
        loss.backward()
        
        optimizer.step()
          
        if batch % 300 == 0:
            loss_list.append(loss.data.item())
            print("loss------------",loss.data.item())

        
plt.plot(np.linspace(0,1000,len(loss_list)),loss_list)
plt.show()        

rets = []
total = 0
correct = 0
with torch.no_grad():
    for data in test_loader:

        X, y = data 
        y_pred = model(X)

        _,predicted = torch.max(y_pred.data,dim=1)

        total += y.size(0)
        correct += (predicted == y).sum().item()

print('accuracy on test set: %.2f %% ' % (100.0*(correct/total)))


在这里插入图片描述

评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值