Person_reID_baseline_pytorch-master,初学习ReID,逐行读代码
train.py
from __future__ import print_function, division
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets, transforms
import torch.backends.cudnn as cudnn
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
#from PIL import Image
from loss import CrossEntropyLabelSmooth, TripletLoss, Margin
import time
import os
from model import ft_net, ft_net_dense, ft_net_NAS, PCB
from random_erasing import RandomErasing
import yaml
import math
from shutil import copyfile
version = torch.__version__
#fp16
try:
from apex.fp16_utils import *
from apex import amp, optimizers
except ImportError: # will be 3.x series
print('This is not an error. If you want to use low precision, i.e., fp16, please install the apex with cuda support (https://github.com/NVIDIA/apex) and update pytorch to 1.0')
######################################################################
# Options
# --------
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
parser.add_argument('--name',default='ft_ResNet50', type=str, help='output model name')
parser.add_argument('--data_dir',default='../iCartoon/pytorch',type=str, help='training dir path')
parser.add_argument('--train_all', action='store_true', help='use all training data' )
parser.add_argument('--color_jitter', action='store_true', help='use color jitter in training' )
parser.add_argument('--batchsize', default=32, type=int, help='batchsize')
parser.add_argument('--stride', default=2, type=int, help='stride')
parser.add_argument('--erasing_p', default=0, type=float, help='Random Erasing probability, in [0,1]')
parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
parser.add_argument('--use_NAS', action='store_true', help='use NAS' )
parser.add_argument('--warm_epoch', default=0, type=int, help='the first K epoch that needs warm up')
parser.add_argument('--lr', default=0.05, type=float, help='learning rate')
parser.add_argument('--droprate', default=0.5, type=float, help='drop rate')
parser.add_argument('--PCB', action='store_true', help='use PCB+ResNet50' )
parser.add_argument('--fp16', action='store_true', help='use float16 instead of float32, which will save about 50% memory' )
opt = parser.parse_args()#解析命令行形参,opt这是一个对象opt.values,保存有命令行参数值,知道参数名,访问对应值opt.lr
fp16 = opt.fp16#赋值,使用float16代替float32节省内存
data_dir = opt.data_dir#训练数据集路径
name = opt.name#模型名
str_ids = opt.gpu_ids.split(',')#获得可用GPU序号
gpu_ids = []
for str_id in str_ids:
gid = int(str_id)
if gid >=0:
gpu_ids.append(gid)
# set gpu ids
if len(gpu_ids)>0:
torch.cuda.set_device(gpu_ids[0])#使用第一个gpu
cudnn.benchmark = True#设置这个flag可以让内置的cudnn的auto-tuner自动寻找最适合当前配置的高效算法,优化运行效率
#如果网络的输入数据在每次 iteration 都变化的话,会导致 cnDNN 每次都会去寻找一遍最优配置,这样反而会降低运行效率
######################################################################
# Load Data
#使用ft_net
transform_train_list = [
transforms.Resize((256,128), interpolation=3),#调整大小,interpolation是插值
transforms.Pad(10),#对图像进行填充(多少个像素),此为上下左右填充10个pixel,例从20*20->40*40
transforms.RandomCrop((256,128)),#crop时的中心坐标随机,每次crop出的图像都有差异
transforms.RandomHorizontalFlip(),#随机的图像水平翻转,左右对调
transforms.ToTensor(),#Convert PIL图像或者numpy.ndarray为tensor,搭建了PIl和image和tensor的桥梁。数据归一化之前必须
#把PIl图像转为tensor,而其它resize或者crop不需要
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])#数据归一化,这些值是从imagenet训练集中抽样算出来的
#input[channel] = (input[channel] - mean[channel]) / std[channel]
#为什么值在0到1之间:通过上一步.ToTensor转换为0到1之间
]
transform_val_list = [
transforms.Resize(size=(256,128),interpolation=3), #Image.BICUBIC
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
#如果使用PCB
if opt.PCB:
transform_train_list = [
transforms.Resize((384,192), interpolation=3),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
transform_val_list = [
transforms.Resize(size=(384,192),interpolation=3), #Image.BICUBIC
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]
if opt.erasing_p>0:
transform_train_list = transform_train_list + [RandomErasing(probability = opt.erasing_p, mean=[0.0, 0.0, 0.0])]
#如果随机擦除大于0,随机擦除矩形区域的一些像素
if opt.color_jitter:
transform_train_list = [transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0)] + transform_train_list
#修改亮度,对比度和饱和度
print(transform_train_list)
data_transforms = {
'train': transforms.Compose( transform_train_list ),
'val': transforms.Compose(transform_val_list),
#串联多个图片变换的操作,将transforms列表里的操作transform进行遍历
}
#直接设空了,只训练分出的部分数据
train_all = ''
#if opt.train_all:
#train_all = '_all'
#dataloaders
image_datasets = {}
image_datasets['train'] = datasets.ImageFolder(os.path.join(data_dir, 'train' + train_all),
data_transforms['train'])
image_datasets['val'] = datasets.ImageFolder(os.path.join(data_dir, 'val'),
data_transforms['val'])
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
shuffle=True, num_workers=8, pin_memory=True) # 8 workers may work faster
for x in ['train', 'val']}
##Dataset类,决定数据从哪读取以及如何读取,bathsize:批大小,numworks:是否多进程读取机制,shuffle:每个epoch是否乱序
#pin_memory就是锁页内存,创建DataLoader时,设置pin_memory=True,则意味着生成的Tensor数据最开始是属于内存中的锁页内存,
#这样将内存的Tensor转义到GPU的显存就会更快一些。 当计算机的内存充足的时候,可以设置pin_memory=True。
#当系统卡住,或者交换内存使用过多的时候,设置pin_memory=False
#drop_last:当样本数不能被batchsize整除时,是否舍弃最后一批数据
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
#训练集的类别数
use_gpu = torch.cuda.is_available()
#有cuda,使用gpu
since = time.time()
# Get a batch of training data
inputs, classes = next(iter(dataloaders['train']))
print(time.time()-since)
######################################################################
# Training the model
# ------------------
#
# Now, let's write a general function to train a model. Here, we will
# illustrate:
#
# - Scheduling the learning rate
# - Saving the best model
#
# In the following, parameter ``scheduler`` is an LR scheduler object from
# ``torch.optim.lr_scheduler``.
y_loss = {} # loss history
y_loss['train'] = []
y_loss['val'] = []
y_err = {}
y_err['train'] = []
y_err['val'] = []
def train_model(model, criterion, optimizer, scheduler, num_epochs):
since = time.time()
#best_model_wts = model.state_dict()
#best_acc = 0.0
warm_up = 0.1 # We start from the 0.1*lrRate 从0.1的学习率开始
warm_iteration = round(dataset_sizes['train']/opt.batchsize)*opt.warm_epoch # first 5 epoch
#前五个epoch使用warm_up
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
#scheduler.step()
model.train(True) # Set model to training mode训练模型
else:
model.train(False) # Set model to evaluate mode评估模型
running_loss = 0.0
running_corrects = 0.0
# Iterate over data.
for data in dataloaders[phase]:
# get the inputs
inputs, labels = data#获取图像加标签
now_batch_size,c,h,w = inputs.shape#inputs的维度,batch_size,通道,高,宽
if now_batch_size<opt.batchsize: # skip the last batch跳过最后一个batch
continue
#print(inputs.shape)
# wrap them in Variable
if use_gpu:
#使用gpu,并将inputs,labels加载到gpu
inputs = Variable(inputs.cuda().detach())
labels = Variable(labels.cuda().detach())
#当我们需要保持一部分网络参数不变,分支网络不会对主网络的梯度造成影响,
#使用detach()函数切断分支的反向传播
#使用detach返回的tensor和原始的共用一个内存,一个修改另一个也改变
else:
inputs, labels = Variable(inputs), Variable(labels)
# if we use low precision, input also need to be fp16
#if fp16:
# inputs = inputs.half()
# zero the parameter gradients
optimizer.zero_grad()
#把梯度置为0,把loss关于weight的导数变为0
# forward
if phase == 'val':
with torch.no_grad():
outputs = model(inputs)
#通过模型得到输出特征向量,前向传播
else:
outputs = model(inputs)
if not opt.PCB:
_, preds = torch.max(outputs.data, 1)
#希望得到该模型预测该样本属于哪个样本的信息,第一个输入是tensor格式,使用.data,
#第二个参数1代表维度的意思,即得到每一行的最大值(0每列的最大值)
#输出会返回两个tensor,第一个tensor是每行的最大值,softmax的输出中最大的事1,
#第一个tensor是全1的tensor,第二个tensor即是preds每行最大值的索引
loss = criterion(outputs, labels)
#计算损失
else:#使用PCB
part = {}
sm = nn.Softmax(dim=1)
#softmax函数会对原特征图在对应维度(这里dim=1,就是第二维)上进行计算,处理到0~1之间。并且对应位置加和为1
num_part = 6
for i in range(num_part):
part[i] = outputs[i]
#将outputs分为6个部分
score = sm(part[0]) + sm(part[1]) +sm(part[2]) + sm(part[3]) +sm(part[4]) +sm(part[5])
_, preds = torch.max(score.data, 1)
#计算每个部分对应的损失
loss = criterion(part[0], labels)
for i in range(num_part-1):
loss += criterion(part[i+1], labels)
# backward + optimize only if in training phase
#前5个epoch使用warm_up
if epoch<opt.warm_epoch and phase == 'train':
warm_up = min(1.0, warm_up + 0.9 / warm_iteration)
loss *= warm_up
if phase == 'train':
if fp16: # we use optimier to backward loss
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
#回传损失,计算梯度,根据梯度更新参数
else:
loss.backward()
optimizer.step()#用来更新参数的,可以从optimizer.param_groups[0][‘params’]里面看到各个层的梯度和权值信息
# statistics
#不同的pytorch版本不同的方式
if int(version[0])>0 or int(version[2]) > 3: # for the new version like 0.4.0, 0.5.0 and 1.0.0
running_loss += loss.item() * now_batch_size
else : # for the old version like 0.3.0 and 0.3.1
running_loss += loss.data[0] * now_batch_size
running_corrects += float(torch.sum(preds == labels.data))
#总损失精度
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
y_loss[phase].append(epoch_loss)
y_err[phase].append(1.0-epoch_acc)
# deep copy the model
if phase == 'val':
last_model_wts = model.state_dict()
save_network(model, epoch)
draw_curve(epoch)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print()
scheduler.step()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
#print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(last_model_wts)
save_network(model, 'last')
return model
######################################################################
# Draw Curve
#---------------------------
#绘制每个epoch后的损失和错误值
x_epoch = []
fig = plt.figure()
ax0 = fig.add_subplot(121, title="loss")
ax1 = fig.add_subplot(122, title="top1err")
def draw_curve(current_epoch):
x_epoch.append(current_epoch)
ax0.plot(x_epoch, y_loss['train'], 'bo-', label='train')
ax0.plot(x_epoch, y_loss['val'], 'ro-', label='val')
ax1.plot(x_epoch, y_err['train'], 'bo-', label='train')
ax1.plot(x_epoch, y_err['val'], 'ro-', label='val')
if current_epoch == 0:
ax0.legend()
ax1.legend()
fig.savefig( os.path.join('./model',name,'train.jpg'))
######################################################################
# Save model
#---------------------------
#保存模型
def save_network(network, epoch_label):
save_filename = 'net_%s.pth'% epoch_label
save_path = os.path.join('./model',name,save_filename)
torch.save(network.cpu().state_dict(), save_path)
if torch.cuda.is_available():
network.cuda(gpu_ids[0])
######################################################################
# Finetuning the convnet
# ----------------------
#
# Load a pretrainied model and reset final fully connected layer.
#
if opt.use_dense:
model = ft_net_dense(len(class_names), opt.droprate)
elif opt.use_NAS:
model = ft_net_NAS(len(class_names), opt.droprate)
else:
model = ft_net(len(class_names), opt.droprate, opt.stride)
#使用那个网络模型,这里直接是ft_net
if opt.PCB:
model = PCB(len(class_names))
#是否使用PCB
opt.nclasses = len(class_names)
print(model)
#输出模型结构查看
if not opt.PCB:
ignored_params = list(map(id, model.classifier.parameters() ))
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer_ft = optim.SGD([
#使用SGD优化器
{'params': base_params, 'lr': 0.1*opt.lr},
{'params': model.classifier.parameters(), 'lr': opt.lr}
], weight_decay=5e-4, momentum=0.9, nesterov=True)
# get optimizer获取优化器
#optimizer对象,能保存当前的参数状态并且基于计算梯度更新参数
#给它一个包含参数(必须都是Variable对象)进行优化,可以指定optimizer的参 数选项,比如学习率,权重衰减
#参数:params (iterable)用于优化的可以迭代参数或定义参数组,lr学习率,momentum动量因子,weight_decay权重衰减,
#dampening动量的抑制因子(默认:0) nesterov 使用Nesterov动量(默认:False)
else:
ignored_params = list(map(id, model.model.fc.parameters() ))
ignored_params += (list(map(id, model.classifier0.parameters() ))
+list(map(id, model.classifier1.parameters() ))
+list(map(id, model.classifier2.parameters() ))
+list(map(id, model.classifier3.parameters() ))
+list(map(id, model.classifier4.parameters() ))
+list(map(id, model.classifier5.parameters() ))
#+list(map(id, model.classifier6.parameters() ))
#+list(map(id, model.classifier7.parameters() ))
)
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer_ft = optim.SGD([
#使用SGD优化器
{'params': base_params, 'lr': 0.1*opt.lr},
{'params': model.model.fc.parameters(), 'lr': opt.lr},
{'params': model.classifier0.parameters(), 'lr': opt.lr},
{'params': model.classifier1.parameters(), 'lr': opt.lr},
{'params': model.classifier2.parameters(), 'lr': opt.lr},
{'params': model.classifier3.parameters(), 'lr': opt.lr},
{'params': model.classifier4.parameters(), 'lr': opt.lr},
{'params': model.classifier5.parameters(), 'lr': opt.lr},
#{'params': model.classifier6.parameters(), 'lr': 0.01},
#{'params': model.classifier7.parameters(), 'lr': 0.01}
], weight_decay=5e-4, momentum=0.9, nesterov=True)
# Decay LR by a factor of 0.1 every 40 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=40, gamma=0.1)
#学习率设定
######################################################################
# Train and evaluate
#
# It should take around 1-2 hours on GPU.
#
dir_name = os.path.join('./model',name)
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
#新建model文件夹
#record every run
copyfile('./train.py', dir_name+'/train.py')
copyfile('./model.py', dir_name+'/model.py')
#把文件copy到model文件夹中
# save opts
with open('%s/opts.yaml'%dir_name,'w') as fp:
yaml.dump(vars(opt), fp, default_flow_style=False)
#保存opt,写入opts.yaml文件中
# model to gpu
model = model.cuda()
if fp16:
#model = network_to_half(model)
#optimizer_ft = FP16_Optimizer(optimizer_ft, static_loss_scale = 128.0)
model, optimizer_ft = amp.initialize(model, optimizer_ft, opt_level = "O1")
#使用float16代替flaot32,减少内存
criterion = nn.CrossEntropyLoss()
#损失设定,这里使用CE loss
#xent_criterion = CrossEntropyLabelSmooth(5013)
#embedding_criterion = TripletLoss(None)
model = train_model(model, criterion, optimizer_ft, exp_lr_scheduler,
num_epochs=600)
#开始训练,传参数
model.py
import torch
import torch.nn as nn
from torch.nn import init
from torchvision import models
from torch.autograd import Variable
import pretrainedmodels
######################################################################
def weights_init_kaiming(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') # For old pytorch, you may use kaiming_normal.
elif classname.find('Linear') != -1:
init.kaiming_normal_(m.weight.data, a=0, mode='fan_out')
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm1d') != -1:
init.normal_(m.weight.data, 1.0, 0.02)
init.constant_(m.bias.data, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
init.normal_(m.weight.data, std=0.001)
init.constant_(m.bias.data, 0.0)
# Defines the new fc layer and classification layer
# |--Linear--|--bn--|--relu--|--Linear--|
#定义了新的fc层和分类层
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, droprate, relu=False, bnorm=True, num_bottleneck=512, linear=True, return_f = False):
#对应参数:输入维度,类别数,丢弃率:抑制过拟合,不保留即丢弃节点数的比例,激活函数,正则化,瓶颈层512,fc,false
#num_bottleneck,瓶颈层,使用1*1的卷积降低了特征图的通道数
super(ClassBlock, self).__init__()
self.return_f = return_f#False
add_block = []
if linear:
add_block += [nn.Linear(input_dim, num_bottleneck)]#类似一个fc层,将输入的维度转num_bottleneck512
else:
num_bottleneck = input_dim
if bnorm:#批正则化
add_block += [nn.BatchNorm1d(num_bottleneck)]
if relu:#激活函数,false
add_block += [nn.LeakyReLU(0.1)]
if droprate>0:
add_block += [nn.Dropout(p=droprate)]
#丢弃droprate比例的节点数
add_block = nn.Sequential(*add_block)#一个时序容器,Modules会以他们传入的顺序被添加到容器中,即添加到网络中
add_block.apply(weights_init_kaiming)#权重初始化
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num)]#将瓶颈层512维转为class_num类别数维
classifier = nn.Sequential(*classifier)#添加到网络结构中
classifier.apply(weights_init_classifier)#初始化参数
self.add_block = add_block
self.classifier = classifier
def forward(self, x):
x = self.add_block(x)
if self.return_f:#false
f = x
x = self.classifier(x)
return x,f
else:
x = self.classifier(x)
return x#输出维度class_num
# Define the ResNet50-based Model
class ft_net(nn.Module):
def __init__(self, class_num, droprate=0.5, stride=2):#class_num类别数,droprate丢弃率,stride步长
super(ft_net, self).__init__()
model_ft = models.resnet50(pretrained=False)#是否使用预训练模型
# avg pooling to global pooling
if stride == 1:#如果步长等于1,调整模型中layer4[0]中的downsample[0]的stride值
model_ft.layer4[0].downsample[0].stride = (1,1)
model_ft.layer4[0].conv2.stride = (1,1)
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))#全局平均池
self.model = model_ft
self.classifier = ClassBlock(2048, class_num, droprate)
#输入维度2048->num_bottleneck512->classifier(class_num)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
通过print(model)输出模型结构,如下最后添加的两个部分:
这里需要注意的一点就是训练的时候最后是对class_num维度的特征进行分析,但是在测试的时候使用了以下语句:
model.classifier.classifier = nn.Sequential()
将classifier.classifier置为空,所以打得到的特征向量的维度为512.