task02 fashion-mnist分类实战

task02 fashion-mnist分类实战

# 环境与基础配置
import os
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
batch_size = 256
lr = 1e-4
epochs = 20
num_workers = 4
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
#数据读入
from torchvision import transforms

image_size = 28
data_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize(image_size),
    transforms.ToTensor(),
]
)
# 使用自带数据集下载
from torchvision import datasets
train_data = datasets.FashionMNIST(root=r"C:\Users\boyif\Desktop\pytorch-learning\task2",download=True,transform=transforms.ToTensor())
test_data = datasets.FashionMNIST(root=r"C:\Users\boyif\Desktop\pytorch-learning\task2",download=True,transform=transforms.ToTensor())
print(len(train_data), len(test_data))
#打印其中⼀张查看
feature, label = train_data[0]
print(feature.shape, label)
60000 60000
torch.Size([1, 28, 28]) 9
train_loader = DataLoader(train_data,batch_size=batch_size,shuffle=True,num_workers=num_workers,drop_last=True)
test_loader = DataLoader(test_data,batch_size=batch_size,shuffle=False,num_workers=num_workers)
# 构建网络
class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(1,32,5),
            nn.ReLU(),
            nn.MaxPool2d(2,stride=2),
            nn.Dropout(0.3),
            nn.Conv2d(32,64,5),
            nn.ReLU(),
            nn.MaxPool2d(2,stride=2),
            nn.Dropout(0.3)
        )
        self.fc = nn.Sequential(
            nn.Linear(64*4*4,512),
            nn.ReLU(),
            nn.Linear(512,10)
        )
    def forward(self, x):
        x=self.conv(x)
        x=x.view(-1,64*4*4)
        x=self.fc(x)
        return x
    
model = Net()
model = model.cuda()
# 设定损失函数
criterion = nn.CrossEntropyLoss()
# 设定优化器
optimizer = optim.Adam(model.parameters(),lr=0.001)
# 训练
def train(epoch):
    model.train()
    train_loss=0
    for data,label in train_loader:
        data,label = data.cuda(),label.cuda()
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output,label)
        loss.backward()
        optimizer.step()
        train_loss +=loss.item()*data.size(0)
    train_loss = train_loss/len(train_loader.dataset)
    print("Epoch:{}\tTraining Loss:{:.6f}".format(epoch,train_loss))
def val(epoch):
    model.eval()
    val_loss = 0
    gt_labels =[]
    pred_labels = []
    with torch.no_grad():
        for data,label in test_loader:
            data,label = data.cuda(),label.cuda()
            output = model(data)
            preds = torch.argmax(output,1)
            gt_labels.append(label.cpu().data.numpy())
            pred_labels.append(preds.cpu().data.numpy())
            loss = criterion(output,label)
            val_loss +=loss.item()*data.size(0)
    val_loss = val_loss/len(test_loader.dataset)
    gt_labels,pred_labels = np.concatenate(gt_labels), np.concatenate(pred_labels)
    acc = np.sum(gt_labels==pred_labels)/len(pred_labels)
    print("Epoch{}\tValidation Loss:{:.6f},Accuracy:{:.6f}".format(epoch, val_loss,acc))
for epoch in range(1,epochs+1):
    train(epoch)
    val(epoch)
Epoch:1	Training Loss:0.675452
Epoch1	Validation Loss:0.446463,Accuracy:0.838033
Epoch:2	Training Loss:0.433276
Epoch2	Validation Loss:0.360429,Accuracy:0.868283
Epoch:3	Training Loss:0.371090
Epoch3	Validation Loss:0.303629,Accuracy:0.888517
Epoch:4	Training Loss:0.335961
Epoch4	Validation Loss:0.277939,Accuracy:0.898717
Epoch:5	Training Loss:0.312558
Epoch5	Validation Loss:0.253409,Accuracy:0.907983
Epoch:6	Training Loss:0.292904
Epoch6	Validation Loss:0.250804,Accuracy:0.910850
Epoch:7	Training Loss:0.281885
Epoch7	Validation Loss:0.234189,Accuracy:0.915583
Epoch:8	Training Loss:0.266308
Epoch8	Validation Loss:0.214899,Accuracy:0.922617
Epoch:9	Training Loss:0.256045
Epoch9	Validation Loss:0.213076,Accuracy:0.920283
Epoch:10	Training Loss:0.248831
Epoch10	Validation Loss:0.202128,Accuracy:0.928667
Epoch:11	Training Loss:0.238236
Epoch11	Validation Loss:0.180277,Accuracy:0.934783
Epoch:12	Training Loss:0.231228
Epoch12	Validation Loss:0.179415,Accuracy:0.935617
Epoch:13	Training Loss:0.221745
Epoch13	Validation Loss:0.169739,Accuracy:0.939083
Epoch:14	Training Loss:0.213817
Epoch14	Validation Loss:0.159587,Accuracy:0.943083
Epoch:15	Training Loss:0.210511
Epoch15	Validation Loss:0.164414,Accuracy:0.943500
Epoch:16	Training Loss:0.203085
Epoch16	Validation Loss:0.141764,Accuracy:0.949633
Epoch:17	Training Loss:0.199027
Epoch17	Validation Loss:0.143231,Accuracy:0.948333
Epoch:18	Training Loss:0.188694
Epoch18	Validation Loss:0.133826,Accuracy:0.952450
Epoch:19	Training Loss:0.187342
Epoch19	Validation Loss:0.131635,Accuracy:0.954050
Epoch:20	Training Loss:0.181600
Epoch20	Validation Loss:0.123742,Accuracy:0.958283
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值