pytorch实战(三)——搭建CNN,训练集和测试集的accuracy、loss可视化

本文基于前面搭建的卷积神经网络(pytorch实战(二)——搭建卷积神经网络(CNN),进行accuracy和loss的可视化

import numpy as np
import torch
import torch.nn as nn
from utils import *
import time
from matplotlib import pyplot as plt
from torch.utils.tensorboard import SummaryWriter
import torchvision
import torchinfo


# tb_writer = SummaryWriter('runs')

num_classes = 50
num_samples_train = 15
num_samples_test = 5
seed = 1
batch_size = 500
epochs = 1000

train_accuracy = np.zeros(epochs, dtype=float)
test_accuracy = np.zeros(epochs, dtype=float)
train_loss = np.zeros(epochs, dtype=float)
test_loss = np.zeros(epochs, dtype=float)

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

class ConvNet(nn.Module):

    def __init__(self):
        super(ConvNet, self).__init__()

        self.layer1 = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=32),
            nn.ReLU(),
            nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=32),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2),
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=64),
            nn.ReLU(),
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=64),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)
        )

        self.layer3 = nn.Sequential(
            nn.Flatten(),
            nn.Dropout(0.85),
            nn.Linear(in_features=64 * 7 * 7, out_features=512),
            nn.BatchNorm1d(num_features=512),
            nn.ReLU(),
            nn.Dropout(0.85),
            nn.Linear(in_features=512, out_features=50),
        )

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        return x

model = ConvNet()
torchinfo.summary(model, input_size=(1, 1, 28, 28))

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.002, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
# https://www.jianshu.com/p/39dac1e24709
# optimizer = torch.optim.Adadelta(model.parameters(), lr=1, rho=0.9, eps=1e-06, weight_decay=0)
# optimizer = torch.optim.AdamW(model.parameters(),lr=0.001,betas=(0.9,0.999),eps=1e-08,weight_decay=0.01,amsgrad=False)

scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10, verbose=False, threshold=0.0001, threshold_mode= 'rel', cooldown=0, min_lr=0, eps=1e-08)
# load data
train_image, train_label, test_image, test_label = LoadData(num_classes, num_samples_train, num_samples_test, seed)
train_image = torch.Tensor(train_image)
train_image = train_image.reshape(num_classes * num_samples_train, 1, 28, 28)
train_label = torch.LongTensor(train_label)
test_image = torch.Tensor(test_image)
test_image = test_image.reshape(num_classes * num_samples_test, 1, 28, 28)
test_label = torch.LongTensor(test_label)

train_dataset = torch.utils.data.TensorDataset(train_image, train_label)
test_dataset = torch.utils.data.TensorDataset(test_image, test_label)

train_loader = torch.utils.data.DataLoader(
    dataset=train_dataset,
    batch_size=batch_size,
)

test_loader = torch.utils.data.DataLoader(
    dataset=test_dataset,
    batch_size=batch_size,
)

# images, labels = next(iter(train_loader))
# grid = torchvision.utils.make_grid(images)
# tb_writer.add_image('images', grid)
# tb_writer.add_graph(model, images)

start_time = time.time()

model = model.to(device)
for epoch in range(epochs):
    model.train()
    train_correct_count = 0
    train_total_count = 0
    train_loss[epoch] = 0
    for batch_count, (train_image, train_label) in enumerate(train_loader):
        train_image, train_label = train_image.to(device), train_label.to(device)
        outputs = model(train_image)
        _, pred = torch.max(outputs.data, 1)
        optimizer.zero_grad()
        loss = criterion(outputs, train_label)
        loss.backward()
        optimizer.step()
        train_loss[epoch] += loss.item()
        train_correct_count += torch.sum(pred == train_label).item()
        train_total_count += train_label.size(0)
    scheduler.step(train_loss[epoch])
    train_loss[epoch] /= len(train_loader)

    model.eval()
    test_correct_count = 0
    test_total_count = 0
    test_loss[epoch] = 0
    with torch.no_grad():
        for batch_count, (test_image, test_label) in enumerate(test_loader):
            test_image, test_label = test_image.to(device), test_label.to(device)
            outputs = model(test_image)
            _, pred = torch.max(outputs.data, 1)
            loss = criterion(outputs, test_label)
            test_loss[epoch] += loss.item()
            test_correct_count += torch.sum(pred == test_label).item()
            test_total_count += test_label.size(0)
    test_loss[epoch] /= len(test_loader)

    train_accuracy[epoch] = train_correct_count / (num_classes * num_samples_train)
    test_accuracy[epoch] = test_correct_count / (num_classes * num_samples_test)
    print('epoch', epoch, ' , ', '训练集正确率:', format(100 * train_accuracy[epoch], '.2f'), '%', ' , ', '测试集正确率:',
          format(100 * test_accuracy[epoch], '.2f'), '%',
          ' , ', '训练集loss:', format(train_loss[epoch], '.5f'), ', ', '测试集loss:', format(test_loss[epoch], '.5f'))

    # tb_writer.add_scalar('train_loss', train_loss[epoch], epoch)
    # tb_writer.add_scalar('train_accuracy', train_accuracy[epoch], epoch)
    # tb_writer.add_scalar('test_loss', test_loss[epoch], epoch)
    # tb_writer.add_scalar('test_accuracy', test_accuracy[epoch], epoch)

end_time = time.time()
print('耗时:', round(end_time - start_time, 2), 'secs')

plt.subplot(1, 2, 1)
plt.plot(train_accuracy, label='Training Accuracy')
plt.plot(test_accuracy, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(train_loss, label='Training Loss')
plt.plot(test_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

# 保存模型
torch.save(model, 'data/cnn_model.pt')
# model = torch.load('./data/cnn_model.pt')
# torchinfo.summary(model, input_size=(1, 1, 28, 28))
  • 4
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值