这几天用resnet50跑了椎骨检测的第一个分类模型,验证集准确率最高有77%
给力的队友已经将每一张大的椎骨图像做好定位并切成小块,做好类别标记并数据增强了,弱鸡的我只要把数据放进分类模型就好
不说了,上代码吧
首先是数据集处理的load_dataset.py
这里用到的是https://blog.csdn.net/qq_40356092/article/details/108472127的代码
import torch
import torchvision
from torchvision import datasets, transforms
#对训练集做一个变换
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(224), #对图片尺寸做一个缩放切割
transforms.RandomHorizontalFlip(), #水平翻转
transforms.ToTensor(), #转化为张量
transforms.Normalize((.5, .5, .5), (.5, .5, .5)) #进行归一化
])
#对测试集做变换
test_transforms = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize((.5, .5, .5), (.5, .5, .5))
])
def load_local_dataset(dataset_dir, ratio = 0.8, batch_size = 256):
#获取数据集
all_datasets = datasets.ImageFolder(dataset_dir, transform=train_transforms)
#将数据集划分成训练集和测试集
train_size=int(ratio * len(all_datasets))
test_size=len(all_datasets) - train_size
train_datasets, test_datasets = torch.utils.data.random_split(all_datasets, [train_size, test_size])
train_iter = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(test_datasets, batch_size=batch_size, shuffle=True)
return train_iter,test_iter
def load_train_test_dataset(train_dir, test_dir , batch_size = 256):
#获取数据集
train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
test_datasets = datasets.ImageFolder(test_dir, transform=test_transforms)
train_iter = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(test_datasets, batch_size=batch_size, shuffle=True)
return train_iter,test_iter
然后就是用GPU开始跑resnet50了,之所以跑50是因为显卡带不动resnet152,害
先是头部分
import torch
import os
import load_dataset
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import OrderedDict
import pandas as pd
os.environ['CUDA_VISIBLE_DEVICES'] = "0,1,2,3" #这里的赋值必须是字符串,list会报错
device_ids=[0,1,2,3]
train_measure_results = []
test_measure_results = []
def dprint(d):
out = []
for k, v in d.items():
out.append(f"{k}: {v:0.4f}")
print(", ".join(out))
下面开始加载模型
#加载模型
#定义两层的残差块
class Residual_2(nn.Module):
def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):
super(Residual_2, self).__init__()
#两个3*3的卷积层
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
#1*1的卷积保证维度一致
if use_1x1conv:
self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
else:
self.conv3 = None
#BN层
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, X):
Y = self.conv1(X)
Y = self.bn1(Y)
Y = torch.nn.functional.relu(Y)
Y = self.conv2(Y)
Y = self.bn2(Y)
if self.conv3:
X = self.conv3(X)
return torch.nn.functional.relu(Y + X)
#定义三层的残差块
class Residual_3(nn.Module):
def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):
super(Residual_3, self).__init__()
#三层卷积层
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
self.conv3 = nn.Conv2d(out_channels, out_channels*4, kernel_size=1)
#1*1的卷积保证维度一致
if use_1x1conv:
self.conv4 = nn.Conv2d(in_channels, out_channels*4, kernel_size=1, stride=stride)
else:
self.conv4 = None
#BN层
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.bn3 = nn.BatchNorm2d(out_channels*4)
def forward(self, X):
Y = self.conv1(X)
Y = self.bn1(Y)
Y = torch.nn.functional.relu(Y)
Y = self.conv2(Y)
Y = self.bn2(Y)
Y = torch.nn.functional.relu(Y)
Y = self.conv3(Y)
Y = self.bn3(Y)
if self.conv4:
X = self.conv4(X)
return torch.nn.functional.relu(Y + X)
classes=7
#平铺
class FlattenLayer(nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, input):
return input.view(input.size(0), -1)
#全局平均池化层
class GlobalAvgPool2d(nn.Module):
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
def forward(self, x):
return nn.functional.avg_pool2d(x, kernel_size=x.size()[2:])
def resnet_block(in_channels, out_channels, num_residuals, basicblock=2, first_block=False):
blk = []
for i in range(num_residuals):
if basicblock == 2:
if i == 0 and first_block == False :
blk.append(Residual_2(in_channels, out_channels, use_1x1conv=True, stride=2))
else :
blk.append(Residual_2(out_channels, out_channels))
else:
if i==0:
if first_block:
blk.append(Residual_3(in_channels, out_channels, use_1x1conv=True))
else :
blk.append(Residual_3(in_channels*4, out_channels, use_1x1conv=True, stride=2))
else:
blk.append(Residual_3(out_channels*4, out_channels, use_1x1conv=True))
return nn.Sequential(*blk)
# 定义resnet网络
def ResNet_model(layers):
#前两层
net = nn.Sequential(
# 7*7的卷积层
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
# 3*3的最大池化层
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
#定义不同结构的ResNet
if layers == 18:
basicblock=2
num_residual=[2,2,2,2]
elif layers == 34:
basicblock=2
num_residual=[3,4,6,3]
elif layers == 50:
basicblock=3
num_residual=[3,4,6,3]
elif layers == 101:
basicblock=3
num_residual=[3,4,23,3]
elif layers == 152:
basicblock=3
num_residual=[3,8,36,3]
else :
exit("ResNet结构不对!")
#添加block
net.add_module("resnet_block1", resnet_block(64, 64, num_residual[0], basicblock, first_block=True))
net.add_module("resnet_block2", resnet_block(64, 128, num_residual[1], basicblock))
net.add_module("resnet_block3", resnet_block(128, 256, num_residual[2], basicblock))
net.add_module("resnet_block4", resnet_block(256, 512, num_residual[3], basicblock))
#添加平均池化层、全连接层
net.add_module("global_avg_pool", GlobalAvgPool2d())
if basicblock==2:
net.add_module("fc", nn.Sequential(FlattenLayer(), nn.Linear(512, classes)))
else:
net.add_module("fc", nn.Sequential(FlattenLayer(), nn.Linear(2048, classes)))
return net
resnet = ResNet_model(50)
resnet=torch.nn.DataParallel(resnet,device_ids=device_ids)
resnet = resnet.cuda()
print("model loaded...")
#加载数据集
ratio=0.8
batch_size=256
#加载MNIST数据集,图片大小为28x28x1,记得修改网络结构
# root="E:/数据集"
# train_iter,test_iter=load_dataset.load_FashionMNIST(root,batch_size)
#加载训练集和测试集
# train_dir = "E:/数据集/rice_diseases/train"
# test_dir = "E:/数据集/rice_diseases/test"
# train_iter, test_iter = load_dataset.load_train_test_dataset(train_dir, test_dir, batch_size)
导入我们的训练集和测试集,这里训练集和测试集是放在一起的,其中文件的格式按照https://blog.csdn.net/qq_40356092/article/details/108472127的格式处理好就好
#训练集和测试集在一个文件夹下
dataset_dir = "./all_image"
train_iter,test_iter=load_dataset.load_local_dataset(dataset_dir,ratio,batch_size)
print("data loaded...")
print("训练集=",len(train_iter))
print("测试集=",len(test_iter))
#定义损失函数和优化器
lr,num_epochs =0.001, 200
loss = torch.nn.CrossEntropyLoss().cuda() #损失函数
optimizer = torch.optim.Adam(resnet.parameters(), lr=lr) #优化器
def train(net, train_iter, test_iter, optimizer, loss, num_epochs):
for epoch in range(num_epochs):
# 训练过程
net.train() # 启用 BatchNormalization 和 Dropout
train_l_sum, train_acc_sum, train_num = 0.0, 0.0, 0
for X, y in train_iter:
X = X.cuda()
y = y.cuda()
y_hat = net(X)
l = loss(y_hat, y).sum()
optimizer.zero_grad()
l.backward()
optimizer.step()
#计算准确率
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
train_num += y.shape[0]
print('epoch %d, loss %.4f, train acc %.3f' % (epoch + 1, train_l_sum / train_num, train_acc_sum / train_num))
train_results = OrderedDict()
train_results['loss'] = train_l_sum / train_num
train_results['train_acc'] = train_acc_sum / train_num
dprint(train_results)
train_measure_results.append(train_results)
# 测试过程
if (epoch+1) %5 == 0:
test_acc_sum, test_num= 0.0, 0
with torch.no_grad(): #不会求梯度、反向传播
net.eval() # 不启用 BatchNormalization 和 Dropout
for X,y in test_iter:
X = X.cuda()
y = y.cuda()
test_acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
test_num += y.shape[0]
print('test acc %.3f' % (test_acc_sum / test_num))
test_results = OrderedDict()
test_results['test_acc'] = test_acc_sum / test_num
dprint(test_results)
test_measure_results.append(test_results)
torch.save(net.module.state_dict(), f'./checkpoint/model_{str(epoch + 1).zfill(4)}.pt') # 保存模型
df_train = pd.DataFrame(train_measure_results)
df_test = pd.DataFrame(test_measure_results)
df_train.to_csv(f"train_result.csv")
df_test.to_csv(f"test_result.csv")
train(resnet, train_iter, test_iter, optimizer, loss, num_epochs)
然后十分感谢这几篇文章,代码都是在这几篇文章的基础上修改的:
使用自己的数据导入pytorch,多GPU训练(其实自己还是没有解决其中一个显卡占比多的问题),还有很多resnet的模型代码,就不一一列举了
最后说说代码的问题,由于有些部分没有修饰,跑一个epoch要一分钟左右,还是很慢的
我好菜啊