罗浩度量学习代码实践(个人学习笔记)
在上一篇表征学习的基础上进行修改(表征学习----ReID代码复现_幸运的的飞起的博客-CSDN博客https://blog.csdn.net/weixin_46480637/article/details/132609634?spm=1001.2014.3001.5502
主要代码 :
1. sample.py
from __future__ import absolute_import from collections import defaultdict import numpy as np import torch from torch.utils.data.sampler import Sampler class RandomIdentitySampler(Sampler): """ Randomly sample N identities, then for each identity, randomly sample K instances, therefore batch size is N*K. Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/data/sampler.py. Args: data_source (Dataset): dataset to sample from. num_instances (int): number of instances per identity. """ def __init__(self, data_source, num_instances=4): self.data_source = data_source self.num_instances = num_instances self.index_dic = defaultdict(list) for index, (_, pid, _) in enumerate(data_source): self.index_dic[pid].append(index) self.pids = list(self.index_dic.keys()) self.num_identities = len(self.pids) def __iter__(self): indices = torch.randperm(self.num_identities) ret = [] for i in indices: pid = self.pids[i] t = self.index_dic[pid] replace = False if len(t) >= self.num_instances else True t = np.random.choice(t, size=self.num_instances, replace=replace) ret.extend(t) return iter(ret) def __len__(self): return self.num_identities * self.num_instances if __name__ == '__main__': from reid_tutorial.util.data_manager import Market1501 dataset = Market1501(root='D:/xiangmu/reid/data') sampler = RandomIdentitySampler(dataset.train) a = sampler.__iter__()
2. losses.py
from __future__ import absolute_import import sys import torch from torch import nn class TripletLoss(nn.Module): """Triplet loss with hard positive/negative mining. Reference: Hermans et al. In Defense of the Triplet Loss for Person Re-Identification. arXiv:1703.07737. Code imported from https://github.com/Cysu/open-reid/blob/master/reid/loss/triplet.py. Args: margin (float): margin for triplet. """ def __init__(self, margin=0.3): super(TripletLoss, self).__init__() self.margin = margin self.ranking_loss = nn.MarginRankingLoss(margin=margin) def forward(self, inputs, targets): """ Args: inputs: feature matrix with shape (batch_size, feat_dim) targets: ground truth labels with shape (num_classes) """ n = inputs.size(0) # Compute pairwise distance, replace by the official when merged dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n) dist = dist + dist.t() dist.addmm_(1, -2, inputs, inputs.t()) dist = dist.clamp(min=1e-12).sqrt() mask = targets.expand(n, n).eq(targets.expand(n, n).t()) dist_ap, dist_an = [], [] for i in range(n): dist_ap.append(dist[i][mask[i]].max().unsqueeze(0)) dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0)) dist_ap = torch.cat(dist_ap) dist_an = torch.cat(dist_an) y = torch.ones_like(dist_an) loss = self.ranking_loss(dist_an, dist_ap, y) return loss class CrossEntropyLoss(nn.Module): def __init__(self, use_gpu=True): super(CrossEntropyLoss, self).__init__() self.use_gpu = use_gpu self.crossentropy_loss = nn.CrossEntropyLoss() def forward(self, inputs, targets): """ Args: inputs: prediction matrix (before softmax) with shape (batch_size, num_classes) targets: ground truth labels with shape (num_classes) """ if self.use_gpu: targets = targets.cuda() loss = self.crossentropy_loss(inputs, targets) return loss if __name__ == '__main__': pass
3.ResNet.py
from __future__ import absolute_import import torch from torch import nn from torch.nn import functional as F import torchvision from IPython import embed class ResNet50(nn.Module): def __init__(self, num_classes, loss={'softmax', 'metric'}, **kwargs): super(ResNet50, self).__init__() resnet50 = torchvision.models.resnet50(pretrained=True) self.loss = loss self.base = nn.Sequential(*list(resnet50.children())[:-2]) if not self.loss == {'metric'}: self.classifier = nn.Linear(2048, num_classes) def forward(self, x): x = self.base(x) x = F.avg_pool2d(x, x.size()[2:]) f = x.view(x.size(0), -1) if not self.training: return f y = self.classifier(f) if self.loss == {'softmax'}: return y elif self.loss == {'metric'}: return f elif self.loss == {'softmax', 'metric'}: return y, f else: print('loss seetings error') if __name__ == '__main__': model = ResNet50(num_classes=751) imgs = torch.Tensor(32, 3, 256, 128)
4 .train_trihard.py
from __future__ import absolute_import import os import sys import time import datetime import argparse import os.path as osp import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader import torch.backends.cudnn as cudnn from torch.optim import lr_scheduler import models from util.losses import CrossEntropyLoss, TripletLoss from util import data_manager from util import transforms as T from util.dataset_loader import ImageDataset from util.utils import Logger from util.utils import AverageMeter, Logger, save_checkpoint from util.eval_metrics import evaluate from util.optimizers import init_optim from util.samplers import RandomIdentitySampler parser = argparse.ArgumentParser(description='Train AlignedReID with cross entropy loss and triplet hard loss') # Datasets parser.add_argument('--root', type=str, default='D:/xiangmu/reid/data', help="root path to data directory") parser.add_argument('-d', '--dataset', type=str, default='market1501', choices=data_manager.get_names()) parser.add_argument('-j', '--workers', default=4, type=int, help="number of data loading workers (default: 4)") parser.add_argument('--height', type=int, default=256, help="height of an image (default: 256)") parser.add_argument('--width', type=int, default=128, help="width of an image (default: 128)") parser.add_argument('--split-id', type=int, default=0, help="split index") # CUHK03-specific setting parser.add_argument('--cuhk03-labeled', action='store_true', help="whether to use labeled images, if false, detected images are used (default: False)") parser.add_argument('--cuhk03-classic-split', action='store_true', help="whether to use classic split by Li et al. CVPR'14 (default: False)") parser.add_argument('--use-metric-cuhk03', action='store_true', help="whether to use cuhk03-metric (default: False)") # Optimization options parser.add_argument('--labelsmooth', action='store_true', help="label smooth") parser.add_argument('--optim', type=str, default='adam', help="optimization algorithm (see optimizers.py)") parser.add_argument('--max-epoch', default=1, type=int, help="maximum epochs to run") parser.add_argument('--start-epoch', default=0, type=int, help="manual epoch number (useful on restarts)") parser.add_argument('--train-batch', default=8, type=int, help="train batch size") parser.add_argument('--test-batch', default=8, type=int, help="test batch size") parser.add_argument('--lr', '--learning-rate', default=0.0002, type=float, help="initial learning rate") parser.add_argument('--stepsize', default=20, type=int, help="stepsize to decay learning rate (>0 means this is enabled)") parser.add_argument('--gamma', default=0.1, type=float, help="learning rate decay") parser.add_argument('--weight-decay', default=5e-04, type=float, help="weight decay (default: 5e-04)") # triplet hard loss parser.add_argument('--margin', type=float, default=0.3, help="margin for triplet loss") parser.add_argument('--num-instances', type=int, default=4, help="number of instances per identity") parser.add_argument('--htri-only', action='store_true', default=False, help="if this is True, only htri loss is used in training") # Architecture parser.add_argument('-a', '--arch', type=str, default='resnet50', choices=models.get_names()) # Miscs parser.add_argument('--print-freq', type=int, default=10, help="print frequency") parser.add_argument('--seed', type=int, default=1, help="manual seed") parser.add_argument('--resume', type=str, default='', metavar='PATH') parser.add_argument('--evaluate', action='store_true', help="evaluation only") parser.add_argument('--eval-step', type=int, default=-1, help="run evaluation for every N epochs (set to -1 to test after training)") parser.add_argument('--start-eval', type=int, default=0, help="start to evaluate after specific epoch") parser.add_argument('--save-dir', type=str, default='log') parser.add_argument('--use_cpu', action='store_true', help="use cpu") parser.add_argument('--gpu-devices', default='0', type=str, help='gpu device ids for CUDA_VISIBLE_DEVICES') parser.add_argument('--reranking', action='store_true', help='result re_ranking') parser.add_argument('--test_distance', type=str, default='global', help='test distance type') parser.add_argument('--unaligned', action='store_true', help='test local feature with unalignment') args = parser.parse_args() def test(model, queryloader, galleryloader, use_gpu, ranks=[1, 5, 10, 20]): batch_time = AverageMeter() model.eval() with torch.no_grad(): qf, q_pids, q_camids, lqf = [], [], [], [] for batch_idx, (imgs, pids, camids) in enumerate(queryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() qf.append(features) lqf.append(local_features) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) lqf = torch.cat(lqf, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print("Extracted features for query set, obtained {}-by-{} matrix".format(qf.size(0), qf.size(1))) gf, g_pids, g_camids, lgf = [], [], [], [] end = time.time() for batch_idx, (imgs, pids, camids) in enumerate(galleryloader): if use_gpu: imgs = imgs.cuda() end = time.time() features = model(imgs) local_features = model(imgs) batch_time.update(time.time() - end) features = features.data.cpu() local_features = local_features.data.cpu() gf.append(features) lgf.append(local_features) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) lgf = torch.cat(lgf, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print("Extracted features for gallery set, obtained {}-by-{} matrix".format(gf.size(0), gf.size(1))) print("==> BatchTime(s)/BatchSize(img): {:.3f}/{}".format(batch_time.avg, args.test_batch)) # feature normlization qf = 1. * qf / (torch.norm(qf, 2, dim=-1, keepdim=True).expand_as(qf) + 1e-12) gf = 1. * gf / (torch.norm(gf, 2, dim=-1, keepdim=True).expand_as(gf) + 1e-12) m, n = qf.size(0), gf.size(0) distmat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \ torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t() #distmat.addmm_(1, -2, qf, gf.t()) distmat.addmm_(qf, gf.t(), beta=1, alpha=-2) distmat = distmat.numpy() if not args.test_distance == 'global': print("Only using global branch") from reid_tutorial import low_memory_local_dist lqf = lqf.permute(0, 2, 1) lgf = lgf.permute(0, 2, 1) local_distmat = low_memory_local_dist(lqf.numpy(), lgf.numpy(), aligned=not args.unaligned) if args.test_distance == 'local': print("Only using local branch") distmat = local_distmat if args.test_distance == 'global_local': print("Using global and local branches") distmat = local_distmat+distmat print("Computing CMC and mAP") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP: {:.1%}".format(mAP)) print("CMC curve") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") if args.reranking: from reid_tutorial import re_ranking if args.test_distance == 'global': print("Only using global branch for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3) else: local_qq_distmat = low_memory_local_dist(lqf.numpy(), lqf.numpy(), aligned=not args.unaligned) local_gg_distmat = low_memory_local_dist(lgf.numpy(), lgf.numpy(), aligned=not args.unaligned) local_dist = np.concatenate( [np.concatenate([local_qq_distmat, local_distmat], axis=1), np.concatenate([local_distmat.T, local_gg_distmat], axis=1)], axis=0) if args.test_distance == 'local': print("Only using local branch for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3, local_distmat=local_dist, only_local=True) elif args.test_distance == 'global_local': print("Using global and local branches for reranking") distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3, local_distmat=local_dist, only_local=False) print("Computing CMC and mAP for re_ranking") cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=args.use_metric_cuhk03) print("Results ----------") print("mAP(RK): {:.1%}".format(mAP)) print("CMC curve(RK)") for r in ranks: print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1])) print("------------------") return cmc[0] def main(): use_gpu = torch.cuda.is_available() if args.use_cpu: use_gpu = False if use_gpu: pin_memory = True else: pin_memory = False if not args.evaluate: sys.stdout = Logger(osp.join(args.save_dir, 'log_train.txt')) else: sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt')) print("==========\nArgs:{}\n==========".format(args)) if use_gpu: print("Currently using GPU {}".format(args.gpu_devices)) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices cudnn.benchmark = True torch.cuda.manual_seed_all(args.seed) else: print("Currently using CPU (GPU is highly recommended)") print("Initializing dataset {}".format(args.dataset)) dataset = data_manager.init_img_dataset( root=args.root, name=args.dataset, split_id=args.split_id,) # data augmentation transform_train = T.Compose([ T.Random2DTranslation(args.height, args.width), T.RandomHorizontalFlip(), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) transform_test = T.Compose([ T.Resize((args.height, args.width)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) trainloader = DataLoader( ImageDataset(dataset.train, transform=transform_train), sampler=RandomIdentitySampler(dataset.train, num_instances=args.num_instances), batch_size=args.train_batch, num_workers=args.workers, pin_memory=pin_memory, drop_last=True, ) queryloader = DataLoader( ImageDataset(dataset.query, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) galleryloader = DataLoader( ImageDataset(dataset.gallery, transform=transform_test), batch_size=args.test_batch, shuffle=False, num_workers=args.workers, pin_memory=pin_memory, drop_last=False, ) print("Initializing model: {}".format(args.arch)) model = models.init_model(name=args.arch, num_classes=dataset.num_train_pids, loss={'softmax', 'metric'}) print("Model size: {:.5f}M".format(sum(p.numel() for p in model.parameters())/1000000.0)) criterion_class = CrossEntropyLoss() criterion_metric = TripletLoss(margin=args.margin) #criterion_metric = TripletLossAlignedReID(margin=args.margin) # criterion_class = nn.CrossEntropyLoss() # optimizer = torch.optim.adam() # 优化器 optimizer = init_optim(args.optim, model.parameters(), args.lr, args.weight_decay) # 学习率 if args.stepsize > 0: scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma) # 阶梯型衰减 start_epoch = args.start_epoch if args.resume: print("Loading checkpoint from '{}'".format(args.resume)) checkpoint = torch.load(args.resume) model.load_state_dict(checkpoint['state_dict']) start_epoch = checkpoint['epoch'] if use_gpu: model = nn.DataParallel(model).cuda() # model.module.parameter() if args.evaluate: print('Evaluate only!') test(model, queryloader, galleryloader, use_gpu) return 0 start_time = time.time() train_time = 0 best_rank1 = -np.inf best_epoch = 0 print('start training') for epoch in range(start_epoch, args.max_epoch): start_train_time = time.time() train(epoch, model, criterion_class, criterion_metric, optimizer, trainloader, use_gpu) #model.save_state_dict() train_time += round(time.time() - start_train_time) if args.stepsize > 0: scheduler.step() if (epoch + 1) > args.start_eval and args.eval_step > 0 and (epoch + 1) % args.eval_step == 0 or (epoch + 1) == args.max_epoch: print("==> Test") rank1 = test(model, queryloader, galleryloader, use_gpu) is_best = rank1 > best_rank1 if is_best: best_rank1 = rank1 best_epoch = epoch + 1 if use_gpu: state_dict = model.module.state_dict() else: state_dict = model.state_dict() save_checkpoint({ 'state_dict': state_dict, 'rank1': rank1, 'epoch': epoch, }, is_best, osp.join(args.save_dir, 'checkpoint_ep' + str(epoch + 1) + '.pth.tar')) print("==> Best Rank-1 {:.1%}, achieved at epoch {}".format(best_rank1, best_epoch)) elapsed = round(time.time() - start_time) elapsed = str(datetime.timedelta(seconds=elapsed)) train_time = str(datetime.timedelta(seconds=train_time)) print("Finished. Total elapsed time (h:m:s): {}. Training time (h:m:s): {}.".format(elapsed, train_time)) def train(epoch, model, criterion_class, criterion_metric, optimizer, trainloader, use_gpu): model.train() losses = AverageMeter() xent_losses = AverageMeter() triplet_losses = AverageMeter() batch_time = AverageMeter() data_time = AverageMeter() end = time.time() for batch_idx, (imgs, pids, _) in enumerate(trainloader): if use_gpu: # GPU跑数据 imgs, pids = imgs.cuda(), pids.cuda() # measure data loading time data_time.update(time.time() - end) outputs, features = model(imgs) xent_loss = criterion_class(outputs, pids) triplet_loss = criterion_metric(features, pids) loss = xent_loss + triplet_loss optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() losses.update(loss.item(), pids.size(0)) xent_losses.update(xent_loss.item(), pids.size(0)) triplet_losses.update(triplet_loss.item(), pids.size(0)) if (batch_idx+1) % args.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'CLoss {xent_loss.val:.4f} ({xent_loss.avg:.4f})\t' 'MLoss {triplet_loss.val:.4f} ({triplet_loss.avg:.4f})\t'.format( epoch+1, batch_idx+1, len(trainloader), batch_time=batch_time, data_time=data_time, loss=losses, xent_loss=xent_losses, triplet_loss=triplet_losses)) if __name__ == '__main__': main()
5.具体实现的项目的地址如下: