import torch
import torch.nn as nn
from torchvision import transforms, datasets
import os, PIL, pathlib, warnings
warnings.filterwarnings("ignore")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
device(type='cuda')
import os,PIL,random,pathlib
data_dir = pathlib.Path('./49-data')
data_paths = list(data_dir.glob('*'))
classNames = [str(path).split("/")[1] for path in data_paths]
classNames
['Dark', 'Green', 'Light', 'Medium']
train_transforms = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
test_transforms = transforms.Compose([
transforms.Resize([224, 224]),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
total_data = datasets.ImageFolder("./49-data", transform=train_transforms)
total_data
Dataset ImageFolder
Number of datapoints: 1200
Root location: ./49-data
StandardTransform
Transform: Compose(
Resize(size=[224, 224], interpolation=bilinear, max_size=None, antialias=warn)
ToTensor()
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
)
total_data.class_to_idx
{'Dark': 0, 'Green': 1, 'Light': 2, 'Medium': 3}
## 3.划分数据集
train_size = int(0.8 * len(total_data))
test_size = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
print(train_dataset, test_dataset)
batch_size = 32
train_dl = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True)
test_dl = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
shuffle=True)
<torch.utils.data.dataset.Subset object at 0x7f465037fa30> <torch.utils.data.dataset.Subset object at 0x7f465037fee0>
for X, y in test_dl:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
Shape of X [N, C, H, W]: torch.Size([32, 3, 224, 224])
Shape of y: torch.Size([32]) torch.int64
## 二、手动搭建VGG-16模型
## 1.搭建模型
import torch.nn.functional as F
class vgg16(nn.Module):
def __init__(self):
super(vgg16, self).__init__()
# 卷积块1
self.block1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
)
# 卷积块2
self.block2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
)
# 卷积块3
self.block3 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
)
# 卷积块4
self.block4 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
)
# 卷积块5
self.block5 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
)
# 全连接网络层,用于分类
self.classifier = nn.Sequential(
nn.Linear(in_features=512 * 7 * 7, out_features=4096),
nn.ReLU(),
nn.Linear(in_features=4096, out_features=4096),
nn.ReLU(),
nn.Linear(in_features=4096, out_features=4)
)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = torch.flatten(x, start_dim=1)
x = self.classifier(x)
return x
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
model = vgg16().to(device)
print(model)
Using cuda device
vgg16(
(block1): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
)
(block2): Sequential(
(0): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
)
(block3): Sequential(
(0): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(5): ReLU()
(6): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
)
(block4): Sequential(
(0): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(5): ReLU()
(6): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
)
(block5): Sequential(
(0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU()
(4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(5): ReLU()
(6): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding=0, dilation=1, ceil_mode=False)
)
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU()
(2): Linear(in_features=4096, out_features=4096, bias=True)
(3): ReLU()
(4): Linear(in_features=4096, out_features=4, bias=True)
)
)
## 2.查看模型详情
# 统计模型参数以及其他指标
import torchsummary as summary
summary.summary(model, (3, 224, 224))
----------------------------------------------------------------
Layer (type) Output Shape Param #
================================================================
Conv2d-1 [-1, 64, 224, 224] 1,792
ReLU-2 [-1, 64, 224, 224] 0
Conv2d-3 [-1, 64, 224, 224] 36,928
ReLU-4 [-1, 64, 224, 224] 0
MaxPool2d-5 [-1, 64, 112, 112] 0
Conv2d-6 [-1, 128, 112, 112] 73,856
ReLU-7 [-1, 128, 112, 112] 0
Conv2d-8 [-1, 128, 112, 112] 147,584
ReLU-9 [-1, 128, 112, 112] 0
MaxPool2d-10 [-1, 128, 56, 56] 0
Conv2d-11 [-1, 256, 56, 56] 295,168
ReLU-12 [-1, 256, 56, 56] 0
Conv2d-13 [-1, 256, 56, 56] 590,080
ReLU-14 [-1, 256, 56, 56] 0
Conv2d-15 [-1, 256, 56, 56] 590,080
ReLU-16 [-1, 256, 56, 56] 0
MaxPool2d-17 [-1, 256, 28, 28] 0
Conv2d-18 [-1, 512, 28, 28] 1,180,160
ReLU-19 [-1, 512, 28, 28] 0
Conv2d-20 [-1, 512, 28, 28] 2,359,808
ReLU-21 [-1, 512, 28, 28] 0
Conv2d-22 [-1, 512, 28, 28] 2,359,808
ReLU-23 [-1, 512, 28, 28] 0
MaxPool2d-24 [-1, 512, 14, 14] 0
Conv2d-25 [-1, 512, 14, 14] 2,359,808
ReLU-26 [-1, 512, 14, 14] 0
Conv2d-27 [-1, 512, 14, 14] 2,359,808
ReLU-28 [-1, 512, 14, 14] 0
Conv2d-29 [-1, 512, 14, 14] 2,359,808
ReLU-30 [-1, 512, 14, 14] 0
MaxPool2d-31 [-1, 512, 7, 7] 0
Linear-32 [-1, 4096] 102,764,544
ReLU-33 [-1, 4096] 0
Linear-34 [-1, 4096] 16,781,312
ReLU-35 [-1, 4096] 0
Linear-36 [-1, 4] 16,388
================================================================
Total params: 134,276,932
Trainable params: 134,276,932
Non-trainable params: 0
----------------------------------------------------------------
Input size (MB): 0.57
Forward/backward pass size (MB): 218.52
Params size (MB): 512.23
Estimated Total Size (MB): 731.32
----------------------------------------------------------------
## 三、训练模型
# 1.编写训练函数
# 训练循环
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset) # 训练集大小
num_batches = len(dataloader) # 批次数目
train_loss, train_acc = 0, 0 # 初始化训练损失
for X, y in dataloader:
X, y = X.to(device), y.to(device)
# 计算预测误差
pred = model(X)
loss = loss_fn(pred, y)
# 反向传播
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 记录acc与loss
train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
train_loss += loss.item()
train_acc /= size
train_loss /= num_batches
return train_acc, train_loss
## 2.编写测试函数
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_bathes = len(dataloader)
test_loss, test_acc = 0, 0
with torch.no_grad():
for imgs, target in dataloader:
imgs, target = imgs.to(device), target.to(device)
# 计算loss
target_pred = model(imgs)
loss = loss_fn(target_pred, target)
test_loss += loss.item()
test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()
test_acc /= size
test_loss /= num_bathes
return test_acc, test_loss
## 3.正式训练
import copy
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
loss_fn = nn.CrossEntropyLoss() # 创建损失函数
epochs = 40
train_loss = []
train_acc = []
test_loss = []
test_acc = []
best_acc = 0 # 设置一个最佳准确率, 作为最佳模型的判断指标
for epoch in range(epochs):
model.train()
epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, optimizer)
model.eval()
epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)
# 保存最佳模型到best_model
if epoch_test_acc > best_acc:
best_acc = epoch_test_acc
best_model = copy.deepcopy(model)
train_acc.append(epoch_train_acc)
train_loss.append(epoch_train_loss)
test_acc.append(epoch_test_acc)
test_loss.append(epoch_test_loss)
# 获取当前的学习率
lr = optimizer.state_dict()['param_groups'][0]['lr']
template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss,
epoch_test_acc * 100, epoch_test_loss, lr))
# 保存最佳模型到文件中
PATH = './best_model.pth' # 保存的参数文件名
torch.save(model.state_dict(), PATH)
print('Done')
Epoch: 1, Train_acc:24.9%, Train_loss:1.388, Test_acc:22.5%, Test_loss:1.387, Lr:1.00E-04
Epoch: 2, Train_acc:41.7%, Train_loss:1.245, Test_acc:45.8%, Test_loss:1.035, Lr:1.00E-04
Epoch: 3, Train_acc:62.0%, Train_loss:0.795, Test_acc:79.6%, Test_loss:0.709, Lr:1.00E-04
Epoch: 4, Train_acc:75.0%, Train_loss:0.655, Test_acc:79.2%, Test_loss:0.480, Lr:1.00E-04
Epoch: 5, Train_acc:82.1%, Train_loss:0.464, Test_acc:83.3%, Test_loss:0.471, Lr:1.00E-04
Epoch: 6, Train_acc:83.9%, Train_loss:0.406, Test_acc:89.6%, Test_loss:0.295, Lr:1.00E-04
Epoch: 7, Train_acc:87.1%, Train_loss:0.321, Test_acc:91.2%, Test_loss:0.267, Lr:1.00E-04
Epoch: 8, Train_acc:85.0%, Train_loss:0.395, Test_acc:90.0%, Test_loss:0.412, Lr:1.00E-04
Epoch: 9, Train_acc:89.4%, Train_loss:0.322, Test_acc:85.8%, Test_loss:0.416, Lr:1.00E-04
Epoch:10, Train_acc:92.0%, Train_loss:0.227, Test_acc:88.8%, Test_loss:0.319, Lr:1.00E-04
Epoch:11, Train_acc:94.3%, Train_loss:0.168, Test_acc:93.3%, Test_loss:0.162, Lr:1.00E-04
Epoch:12, Train_acc:94.3%, Train_loss:0.133, Test_acc:95.0%, Test_loss:0.110, Lr:1.00E-04
Epoch:13, Train_acc:95.3%, Train_loss:0.123, Test_acc:92.1%, Test_loss:0.235, Lr:1.00E-04
Epoch:14, Train_acc:94.0%, Train_loss:0.149, Test_acc:96.7%, Test_loss:0.079, Lr:1.00E-04
Epoch:15, Train_acc:96.6%, Train_loss:0.071, Test_acc:98.8%, Test_loss:0.051, Lr:1.00E-04
Epoch:16, Train_acc:97.5%, Train_loss:0.064, Test_acc:96.2%, Test_loss:0.140, Lr:1.00E-04
Epoch:17, Train_acc:97.5%, Train_loss:0.073, Test_acc:98.3%, Test_loss:0.067, Lr:1.00E-04
Epoch:18, Train_acc:96.8%, Train_loss:0.102, Test_acc:96.7%, Test_loss:0.098, Lr:1.00E-04
Epoch:19, Train_acc:98.0%, Train_loss:0.065, Test_acc:96.7%, Test_loss:0.070, Lr:1.00E-04
Epoch:20, Train_acc:97.5%, Train_loss:0.065, Test_acc:97.1%, Test_loss:0.058, Lr:1.00E-04
Epoch:21, Train_acc:97.7%, Train_loss:0.051, Test_acc:96.7%, Test_loss:0.071, Lr:1.00E-04
Epoch:22, Train_acc:98.5%, Train_loss:0.045, Test_acc:97.9%, Test_loss:0.067, Lr:1.00E-04
Epoch:23, Train_acc:98.5%, Train_loss:0.043, Test_acc:95.8%, Test_loss:0.094, Lr:1.00E-04
Epoch:24, Train_acc:99.5%, Train_loss:0.020, Test_acc:98.3%, Test_loss:0.053, Lr:1.00E-04
Epoch:25, Train_acc:99.6%, Train_loss:0.016, Test_acc:97.5%, Test_loss:0.081, Lr:1.00E-04
Epoch:26, Train_acc:97.1%, Train_loss:0.104, Test_acc:96.2%, Test_loss:0.084, Lr:1.00E-04
Epoch:27, Train_acc:98.4%, Train_loss:0.045, Test_acc:98.3%, Test_loss:0.049, Lr:1.00E-04
Epoch:28, Train_acc:97.1%, Train_loss:0.113, Test_acc:97.5%, Test_loss:0.103, Lr:1.00E-04
Epoch:29, Train_acc:98.8%, Train_loss:0.049, Test_acc:98.8%, Test_loss:0.053, Lr:1.00E-04
Epoch:30, Train_acc:99.0%, Train_loss:0.035, Test_acc:97.5%, Test_loss:0.093, Lr:1.00E-04
Epoch:31, Train_acc:98.3%, Train_loss:0.047, Test_acc:95.0%, Test_loss:0.138, Lr:1.00E-04
Epoch:32, Train_acc:97.6%, Train_loss:0.064, Test_acc:96.7%, Test_loss:0.076, Lr:1.00E-04
Epoch:33, Train_acc:98.5%, Train_loss:0.062, Test_acc:98.8%, Test_loss:0.054, Lr:1.00E-04
Epoch:34, Train_acc:99.5%, Train_loss:0.025, Test_acc:98.3%, Test_loss:0.035, Lr:1.00E-04
Epoch:35, Train_acc:99.0%, Train_loss:0.028, Test_acc:92.5%, Test_loss:0.153, Lr:1.00E-04
Epoch:36, Train_acc:98.3%, Train_loss:0.051, Test_acc:96.2%, Test_loss:0.142, Lr:1.00E-04
Epoch:37, Train_acc:98.6%, Train_loss:0.058, Test_acc:98.3%, Test_loss:0.047, Lr:1.00E-04
Epoch:38, Train_acc:99.4%, Train_loss:0.016, Test_acc:98.8%, Test_loss:0.028, Lr:1.00E-04
Epoch:39, Train_acc:99.2%, Train_loss:0.023, Test_acc:96.2%, Test_loss:0.126, Lr:1.00E-04
Epoch:40, Train_acc:97.9%, Train_loss:0.054, Test_acc:98.3%, Test_loss:0.062, Lr:1.00E-04
Done
# ## 四、结果可视化
# ### 1.Loss与Accuracy图
# import matplotlib.pyplot as plt
# #隐藏警告
# import warnings
# warnings.filterwarnings("ignore") #忽略警告信息
# plt.rcParams['font.sans-serif'] = ['sans-serif'] # 用来正常显示中文标签
# plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
# plt.rcParams['figure.dpi'] = 100 #分辨率
# epochs_range = range(epochs)
# plt.figure(figsize=(12, 3))
# plt.subplot(1, 2, 1)
# plt.plot(epochs_range, train_acc, label='Training Accuracy')
# plt.plot(epochs_range, test_acc, label='Test Accuracy')
# plt.legend(loc='lower right')
# plt.title('Training and Validation Accuracy')
# plt.subplot(1, 2, 2)
# plt.plot(epochs_range, train_loss, label='Training Loss')
# plt.plot(epochs_range, test_loss, label='Test Loss')
# plt.legend(loc='upper right')
# plt.title('Training and Validation Loss')
# plt.show()
这周自己学习时间没有合理安排,导致任务完成得比较差,由于截至时间要到了,所以想先提交打卡,再找时间研究学习,噫噫噫呜呜呜噫…认识到错误了。