import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torchvision import models
from torch.utils import data
device = torch.device("cuda" )
import matplotlib.pyplot as plt
class Metric:
'''Metric computes accuracy/precision/recall/confusion_matrix with batch updates.'''
def __init__(self, num_classes):
self.num_classes = num_classes
self.y = []
self.t = []
def update(self, y, t):
'''Update with batch outputs and labels.
Args:
y: (tensor) model outputs sized [N,].
t: (tensor) labels targets sized [N,].
'''
self.y.append(y)
self.t.append(t)
def _process(self, y, t):
'''Compute TP, FP, FN, TN.
Args:
y: (tensor) model outputs sized [N,].
t: (tensor) labels targets sized [N,].
Returns:
(tensor): TP, FP, FN, TN, sized [num_classes,].
'''
tp = torch.empty(self.num_classes)
fp = torch.empty(self.num_classes)
fn = torch.empty(self.num_classes)
tn = torch.empty(self.num_classes)
for i in range(self.num_classes):
tp[i] = ((y == i) & (t == i)).sum().item()
fp[i] = ((y == i) & (t != i)).sum().item()
fn[i] = ((y != i) & (t == i)).sum().item()
tn[i] = ((y != i) & (t != i)).sum().item()
return tp, fp, fn, tn
def accuracy(self, reduction='mean'):
'''Accuracy = (TP+TN) / (P+N).
Args:
reduction: (str) mean or none.
Returns:
(tensor) accuracy.
'''
if not self.y or not self.t:
return
assert(reduction in ['none', 'mean'])
y = torch.cat(self.y, 0)
t = torch.cat(self.t, 0)
tp, fp, fn, tn = self._process(y, t)
if reduction == 'none':
acc = tp / (tp + fn)
else:
acc = tp.sum() / (tp + fn).sum()
return acc
def precision(self, reduction='mean'):
'''Precision = TP / (TP+FP).
Args:
reduction: (str) mean or none.
Returns:
(tensor) precision.
'''
if not self.y or not self.t:
return
assert(reduction in ['none', 'mean'])
y = torch.cat(self.y, 0)
t = torch.cat(self.t, 0)
tp, fp, fn, tn = self._process(y, t)
prec = tp / (tp + fp)
prec[torch.isnan(prec)] = 0
if reduction == 'mean':
prec = prec.mean()
return prec
def recall(self, reduction='mean'):
'''Recall = TP / P.
Args:
reduction: (str) mean or none.
Returns:
(tensor) recall.
'''
if not self.y or not self.t:
return
assert(reduction in ['none', 'mean'])
y = torch.cat(self.y, 0)
t = torch.cat(self.t, 0)
tp, fp, fn, tn = self._process(y, t)
recall = tp / (tp + fn)
recall[torch.isnan(recall)] = 0
if reduction == 'mean':
recall = recall.mean()
return recall
def confusion_matrix(self):
y = torch.cat(self.y, 0)
t = torch.cat(self.t, 0)
matrix = torch.zeros(self.num_classes, self.num_classes)
for i in range(self.num_classes):
for j in range(self.num_classes):
matrix[j][i] = ((y == i) & (t == j)).sum().item()
return matrix
batch_size = 8
learning_rate = 0.0002
num_epochs=50
train_transforms = transforms.Compose([
transforms.CenterCrop(200),
transforms.ToTensor(),
transforms.Normalize([0.5027, 0.4421, 0.3137], [0.3256, 0.3013, 0.3008])
])
#对测试集做变换
val_transforms = transforms.Compose([
transforms.Resize(200),
transforms.ToTensor(),
transforms.Normalize([0.5027, 0.4421, 0.3137], [0.3256, 0.3013, 0.3008])
])
train_dir = r"D:\engineer\transfer\train" #训练集路径
#定义数据集
train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
# print(train_datasets.class_to_idx) #类名:类序号(从0开始)
# print(train_datasets.imgs)
#加载数据集
train_dataloader = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
val_dir = r"D:\engineer\transfer\val"
val_datasets = datasets.ImageFolder(val_dir, transform=val_transforms)
val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True)
class VGGNet(nn.Module):
def __init__(self, num_classes=6): #num_classes ...
super(VGGNet, self).__init__()
net = models.vgg16(pretrained=True) # 从预训练模型加载VGG16网络参数
net.classifier = nn.Sequential() # 将分类层置空,下面将改变我们的分类层
self.features = net # 保留VGG16的特征层
self.classifier = nn.Sequential( # 定义自己的分类层
nn.Linear(512 * 7 * 7, 512), # 512 * 7 * 7不能改变 ,由VGG16网络决定的,第二个参数为神经元个数可以微调
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 128),
nn.ReLU(True),
nn.Dropout(p=0.2),
nn.Linear(128, num_classes),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
net = VGGNet().to(device)
optimizer = optim.Adam(net.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss()
criterion.to(device=device)
# 训练模型
def train(epoch):
'''*********************************************************************************'''
loss, current, n = 0.0, 0.0, 0
metric = Metric(num_classes=6)
for batch, (x, y) in enumerate(train_dataloader):
image, y = x.to(device), y.to(device)
output = net(image)
cur_loss = criterion(output, y)
_, pred = torch.max(output, axis=1)
cur_acc = torch.sum(y == pred) / output.shape[0]
# 反向传播
optimizer.zero_grad()
cur_loss.backward()
optimizer.step()
loss += cur_loss.item()
current += cur_acc.item()
n = n + 1
metric.update(pred, y)
prec = metric.precision()
recall = metric.recall()
train_loss = loss / n
train_acc = current / n
print(
'*——————————————train Loss(avg): {:.6f}\t train Accuracy(avg): {:.6f}————————————*'.format(train_loss, train_acc))
return train_loss,train_acc,prec,recall # 一会儿画图会用
def test(epoch):
'''*********************************************************************************'''
net.eval()
loss, current, n = 0.0, 0.0, 0
metric = Metric(num_classes=6)
with torch.no_grad():
for batch, (x, y) in enumerate(train_dataloader):
image, y = x.to(device), y.to(device)
output = net(image)
cur_loss = criterion(output, y)
_, pred = torch.max(output, axis=1)
cur_acc = torch.sum(y == pred) / output.shape[0]
loss += cur_loss.item()
current += cur_acc.item()
n = n + 1
metric.update(pred, y)
prec = metric.precision()
recall = metric.recall()
val_loss = loss / n
val_acc = current / n
print(
'*——————————————Test Loss(avg): {:.6f}\t Test Accuracy(avg): {:.6f}————————————*'.format(val_loss, val_acc))
return val_loss, val_acc,prec,recall
def matplot_v1(loss,loss1):
x = range(0, num_epochs)
plt.plot(x,loss,'r-')
plt.plot(x, loss1, 'b-')
plt.title("loss")
plt.legend((['loss train','loss val']))
plt.show()
def matplot_v2(acc,acc1):
x = range(0, num_epochs)
plt.plot(x,acc,'r-')
plt.plot(x, acc1, 'b-')
plt.title("acc%")
plt.legend((['acc train','acc val']))
plt.show()
def matplot_v3(p,p2,r,r2):
x = range(0, num_epochs)
plt.plot(x,p,'r-')
plt.plot(x, p2, 'b-')
plt.plot(x, r, 'r-')
plt.plot(x, r2, 'r-')
plt.legend((['pred train','pred val','recall train','recall val']))
plt.show()
Loss=[]
Loss2=[]
Accuracy=[]
Accuracy2=[]
Pred=[]
Pred2=[]
Recall=[]
Recall2=[]
for epoch in range(0, num_epochs):
print('*'*30,'EPOCH:',epoch,'*'*30)
a,b,c,d=train(epoch)
a1,b1,c1,d1=test(epoch)
Loss.append(a)
Loss2.append(a1)
Accuracy.append(b)
Accuracy2.append(b1)
Pred.append(c)
Pred2.append(c1)
Recall.append(d)
Recall2.append(d1)
matplot_v1(Loss,Loss2)
matplot_v2(Accuracy,Accuracy2)
matplot_v3(Pred,Pred2,Recall,Recall2)
VGG16迁移学习+集成精确率,召回率评价指标
最新推荐文章于 2024-05-09 13:14:38 发布