手部姿态检测(按视频段)_v3(hcn)

参考链接:

https://github.com/huguyuehuhu/HCN-pytorch

hcn(分层共现网络):
1.2018年提出,早于st-gcn
2.区别于GCN和RNNs,这是基于CNN的骨骼行为识别
3.通过维度转换,实现了对通道的卷积处理,建立了任意两个节点之间的可学习权重连接,将图当成强连通图对待

试验结果:

代码工程:

本文只贴出三个文件,数据处理+训练+测试,其他的可从参考链接下载。

data.py   数据处理

from torch.utils import data
import torch
import os
import random
import numpy as np


list1=[16,21,24,32]   # 测试人物编号
list2=[22,33]   # 验证测试编号

forder='2'
forder2='2'
split_label=1   # 判断是否在标签中加入视频名字
frame_nums=64   # 帧数
file_name = 'dataset2/test8/joint/clear/txt/'+str(forder)+'.txt'

save='dataset2/test8/joint/clear/'+str(forder2)+"/"
if os.path.exists(save) == False:
	os.makedirs(save)


f = open(file_name)
lines = f.readlines()

prev_video = int(lines[0].strip().split(' ')[1])   # 视频编号
prev_categ = int(lines[0].strip().split(' ')[2])   # 类别标签

datas=[]
datas_label=[]
frames = []
train = []
valid = []
test  = []
train_label = []
valid_label = []
test_label  = []
m=0

for line in lines:
	line = line.strip().split(' ')
	vid = int(line[1])   # 视频编号
	aid = int(line[0])   # 任务编号
	cid = int(line[2])   # 类别标签
	label=list(map(int, line[:3]))
	features = list(map(float, line[3:]))   # 21个特征点
	
	# 若是视频标签相同,则都放入数组中,作为一条训练数据
	if prev_video == vid:
		frames.append(np.reshape(np.asarray(features), (-1,3)))   # 把一维转换成[15,3]的格式
	else:
		# 如果一条视频帧数过多,则选取前frame_nums帧,并连接起来,转成torch格式
		if len(frames) >= frame_nums:
			# frames = random.sample(frames, frame_nums)   # 随机取帧
			frames = frames[0:frame_nums]    # 按顺序取帧
			frames = torch.from_numpy(np.stack(frames, 0))  # 把每一帧在0维连接起来,转成torch格式

		# 若是视频帧数不够多,则利用线性插值,把数据补充到frame_nums帧
		else:
			frames = np.stack(frames, 0) # 把每一帧连接起来,如:n帧 n*[1,15,3]=[n,15,3] 作为一条数据
			xloc = np.arange(frames.shape[0])   # np.arange:生成n个自然数,即等于frame_nums帧数
			new_xloc = np.linspace(0, frames.shape[0], frame_nums)  # 生成start和end之间frame_nums个等差间隔的元素,如:1、2、··n
			frames = np.reshape(frames, (frames.shape[0], -1)).transpose()  # transpose:矩阵转置
			# print(frames.shape,xloc.shape,new_xloc.shape)
			
			new_datas = []
			for data in frames:
				new_datas.append(np.interp(new_xloc, xloc, data))   # interp:进行线性插值, 获得frame_nums帧数据
			frames = torch.from_numpy(np.stack(new_datas, 0)).t()  # 把n帧数据再次连接起来,转换torch格式

		frames = frames.view(frame_nums, -1, 3)  # 强制reshape矩阵形状
		datas.append(frames)   #数据
		if split_label==1:
			datas_label.append(label)   #标签
		else:
			datas_label.append(prev_categ)   #标签

		# m+=1
		# # 2.按人物编号分
		# if aid in list1:
		# 	test.append(frames)
		# 	test_label.append(prev_categ)
		# elif aid in list2:
		# 	valid.append(frames)
		# 	valid_label.append(prev_categ)
		# else:
		# 	train.append(frames)
		# 	train_label.append(prev_categ)

		frames = [np.reshape(np.asarray(features), (-1,3))]  # frames重置,等于每条视频的第一帧的关键点 

	prev_actor = aid   # 人物编号重置
	prev_video = vid   # 视频编号重置
	prev_categ = cid   # 标签重置


# 3.随机划分
lens=len(datas)
num=random.sample(range(lens),lens)   #获取随机数

for i in range(lens):
	index=num[i]
	if i <=int(lens*0.7):
		train.append(datas[index])
		train_label.append(datas_label[index])
	elif i <=int(lens*0.9):
		valid.append(datas[index])
		valid_label.append(datas_label[index])
	else:
		test.append(datas[index])
		test_label.append(datas_label[index])



train_label = torch.from_numpy(np.asarray(train_label))
valid_label = torch.from_numpy(np.asarray(valid_label))
test_label  = torch.from_numpy(np.asarray(test_label))
print(len(train_label),len(valid_label),len(test_label))
print(test_label.shape)


torch.save((torch.stack(train, 0), train_label), save+'train.pkl')
torch.save((torch.stack(valid, 0), valid_label), save+'valid.pkl')
torch.save((torch.stack(test, 0),  test_label),  save+'test.pkl')


# 数据处理:
# 1.一段2min的视频,每1s剪裁下来,编号1-n,得到n段视频(n条数据);加上人物编号1-m;每段加动作类型标签:xx
# 2.每一帧得到关键坐标点,以某个稳定的中心点作为坐标原点,其他的点都减去这个原点
# 原点坐标改为(0,0),得到整体相对坐标;
# 3. 把坐标变为一维存储,后面要用再从代码reshape成(n,-1,2)

train.py (cpu)

import sys
import matplotlib.pyplot as plt
import argparse
import logging
import os
import random
import numpy as np
import torch
import json
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR,ExponentialLR,ReduceLROnPlateau
from torch.autograd import Variable
from tqdm import tqdm
tqdm.monitor_interval = 0
import torchnet
# from torchnet.meter import ConfusionMeter,aucmeter
from torchnet.logger import VisdomPlotLogger, VisdomLogger,MeterLogger 
import torch.backends.cudnn as cudnn
import time
from utils import  utils
from utils.utils import str2bool
from utils import  data_loader
from utils import HCN


parser = argparse.ArgumentParser()
# 模型路径
parser.add_argument('--json_file', default='model/params_train.json', help="dataset name ")
parser.add_argument('--model_dir', default='model/HCN/test8.2/',help="parents directory of model")
parser.add_argument('--type', default='train', help='train,or load_train')

# 训练轮数
parser.add_argument('--start_epoch', default=0, help='')
parser.add_argument('--num_epochs', default=200, help='')
parser.add_argument('--every_n_epoch', default=20, help='')
parser.add_argument('--summary_steps', default=1, help='')

# 数据集
split_label=1
data_forder = "test8/bone/clear/2"
parser.add_argument('--train_path', type=str, default='dataset2/'+str(data_forder)+'/train.pkl')
parser.add_argument('--valid_path', type=str, default='dataset2/'+str(data_forder)+'/valid.pkl')
parser.add_argument('--test_path',  type=str, default='dataset2/'+str(data_forder)+'/test.pkl')
parser.add_argument('--batch_size',  type=int, default=1)

# 把视频名字和标签分离
def get_label_vid(data):
    vid=[]
    label=[]
    for i in  data:
        label.append(int(i[2]))
        name=str(int(i[0]))+'_'+str(int(i[1]))+'.mp4'
        vid.append(name)
        
    labels=torch.from_numpy(np.array(label))
    return labels,np.array(vid)

def train(model, optimizer, loss_fn, train_loader, train_label, metrics, params,logger):
    model.train()  # model.train():启用 BatchNormalization、Dropout    model.eval():不启用

    # summary for current training loop and a running average object for loss
    summ = []
    loss_avg = utils.RunningAverage()
    confusion_meter = torchnet.meter.ConfusionMeter(params.model_args["num_class"], normalized=True)
    confusion_meter.reset()

    # 设置tqdm训练记录器
    # with tqdm(total=len(train_loader)) as t:
    for i, data_batch in enumerate(train_loader):
        data_batch=data_batch[0].float()
        labels_batch=train_label[i].view(1).long()
        # print(type(data_batch),type(labels_batch))
        
        # 判断是否用gpu
        if params.cuda:
            data_batch, labels_batch = data_batch.cuda(params.gpu_id), labels_batch.cuda(params.gpu_id)

        # convert to torch Variables
        data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)

        # compute model output and loss
        output_batch = model(data_batch,target=labels_batch)

        loss_bag = loss_fn(output_batch,labels_batch,current_epoch=params.current_epoch, params=params)
        loss = loss_bag['ls_all']

        confusion_meter.add(output_batch.data,labels_batch.data)
        
        optimizer.zero_grad()   # 每个batch清除一次梯度
        loss.backward()   # 反向传播loss,计算梯度
        optimizer.step()   # 使用计算出的梯度执行更新

        # 每n步计算一遍预测结果
        if i % args.summary_steps == 0:
            # extract data from torch Variable, move to cpu, convert to numpy arrays
            output_batch = output_batch.data
            labels_batch = labels_batch.data

            # compute all metrics on this batch
            summary_batch = {metric:metrics[metric](output_batch, labels_batch) for metric in metrics}
            
            for l,v in loss_bag.items():
                summary_batch[l]=v.data.item()
            summ.append(summary_batch)

        # 更新平均损失,主要用于进度条
        loss_running = loss.data.item()
        loss_avg.update(loss_running )

        # t.set_postfix(loss_running ='{:05.3f}'.format(loss_avg()))
        # t.update()

    # compute mean of all metrics in summary
    metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
    metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
    logger.info("- Train metrics: " + metrics_string)

    return metrics_mean,confusion_meter

def evaluate(model, loss_fn, valid_loader,valid_label, metrics, params,logger):
    model.eval()
    # summary for current eval loop
    summ = []
    confusion_meter = torchnet.meter.ConfusionMeter(params.model_args["num_class"], normalized=True)
    confusion_meter.reset()

    # compute metrics over the dataset
    for i, data_batch in enumerate(train_loader):
        data_batch=data_batch[0].float()
        labels_batch=train_label[i].view(1).long()
        # print(type(data_batch),type(labels_batch))
        
        # 判断是否用gpu
        if params.cuda:
            data_batch, labels_batch = data_batch.cuda(params.gpu_id), labels_batch.cuda(params.gpu_id)

        # fetch the next evaluation batch
        data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)

        # compute model output
        output_batch = model(data_batch)

        loss_bag  = loss_fn(output_batch,labels_batch,current_epoch=params.current_epoch, params=params)
        loss = loss_bag['ls_all']

        confusion_meter.add(output_batch.data,labels_batch.data)

        # extract data from torch Variable, move to cpu, convert to numpy arrays
        output_batch = output_batch.data
        labels_batch = labels_batch.data

        # compute all metrics on this batch
        summary_batch = {metric: metrics[metric](output_batch, labels_batch) for metric in metrics}
        
        for l, v in loss_bag.items():
            summary_batch[l] = v.data.item()
        summ.append(summary_batch)

    # compute mean of all metrics in summary
    metrics_mean = {metric: np.mean([x[metric] for x in summ]) for metric in summ[0]}
    metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
    # logger.info("- Eval metrics : " + metrics_string)

    return metrics_mean,confusion_meter

def train_and_evaluate(model, train_loader,train_label, valid_loader,valid_label, optimizer,loss_fn, metrics, params, args, logger):
    best_val_acc = 0.0

    # 恢复模型
    if args.type == 'load_train':
        restore_file=os.path.join(args.model_dir, 'last.pth.tar')
        logging.info("Restoring parameters from {}".format(restore_file))

        checkpoint = utils.load_checkpoint(restore_file, model, optimizer)
        args.start_epoch = checkpoint['epoch']

        best_val_acc = checkpoint['best_val_acc']
        print('best_val_acc=',best_val_acc)
        print(optimizer.state_dict()['param_groups'][0]['lr'], checkpoint['epoch'])

    # 按需调整学习率:适合后期调试使用,观察 loss 曲线,为每个实验定制学习率调整时机。
    if params.lr_decay_type == 'multistep':
        scheduler = MultiStepLR(optimizer, milestones=params.lr_step, gamma=params.gamma1,last_epoch= args.start_epoch-1)
    # 指数衰减调整学习率
    elif params.lr_decay_type == 'exp':
        scheduler =  ExponentialLR(optimizer, gamma=params.gamma2,last_epoch=args.start_epoch - 1)
    # 自适应调整学习率::当某指标不再变化(如:acc、loss),调整学习率
    elif params.lr_decay_type == 'plateau':
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=params.gamma3, patience=params.patience, 
                    verbose=False,threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0,eps=1e-08)

    # 训练模型
    x=[]
    y_loss=[]
    y_acc=[]
    for epoch in range(args.start_epoch,args.num_epochs):
        star=time.time()
        params.current_epoch = epoch

        # 学习率衰减
        if params.lr_decay_type != 'plateau':
            scheduler.step()

        # 对一个batch的数据进行训练
        train_metrics,train_confusion_meter = train(model, optimizer, loss_fn, train_loader, train_label, metrics, params,logger)

        # 模型验证
        val_metrics,val_confusion_meter = evaluate(model, loss_fn, valid_loader,valid_label, metrics, params,logger)

        # 打印时间/进度
        end=time.time()-star
        logger.info("Epoch {}/{};   Time:{:05.3f}".format(epoch + 1, args.num_epochs, end))

        # 学习率衰减
        if params.lr_decay_type == 'plateau':
            scheduler.step(val_metrics['ls_all'])

        # 验证集测试结果
        val_acc = val_metrics['accuracytop1']
        is_best = val_acc >= best_val_acc
        val_metrics['best_epoch'] = epoch + 1

        # # 模型保存1
        utils.save_checkpoint(model,epoch= epoch+1,is_best=is_best,every_n_epoch = args.every_n_epoch,
                               checkpoint = args.model_dir)
        
        # # 模型保存2
        # utils.save_checkpoint( {'epoch': epoch + 1,
        #                        'state_dict': model.state_dict(),
        #                        'optim_dict': optimizer.state_dict(),
        #                         'best_val_acc':best_val_acc},
        #                       epoch= epoch+1,
        #                       is_best=is_best,
        #                       every_n_epoch = args.every_n_epoch,
        #                       checkpoint = args.model_dir)

        # 把最新的Val指标保存在json文件中
        last_json_path = os.path.join(args.model_dir, "val_last_weights.json")
        utils.save_dict_to_json(val_metrics, last_json_path)

        # 如果有更优的,则把最优的Val指标保存在json文件中
        if is_best:
            best_val_acc = val_acc
            best_json_path = os.path.join(args.model_dir, "val_best_weights.json")
            utils.save_dict_to_json(val_metrics, best_json_path)
            # logger.info("- Found new best accuracy")

        if train_metrics['ls_all']<=0.001:
            #画图
            # plt.subplots(1, 1)
            plt.rcParams['font.sans-serif'] = ['SimHei']#可以plt绘图过程中中文无法显示的问题
            plt.plot(x,y_loss, color='red', linewidth = '2', label = 'loss', linestyle='-', marker=' ')    #画图
            plt.plot(x,y_acc, color='green', linewidth = '2', label = 'acc', linestyle='-', marker=' ')    #画图

            plt.savefig(args.model_dir+"loss.png")
            plt.legend(loc='upper left')#显示图例,如果注释改行,即使设置了图例仍然不显示
            
            print("loss=",train_metrics['ls_all'])
            break

        # # vis logger
        # accs = [100. * (1 - train_metrics['accuracytop1']),100. * (1 - train_metrics['accuracytop2']),
        #        100. * (1 - val_metrics['accuracytop1']),100. * (1 - val_metrics['accuracytop2']),]
        # error_logger15.log([epoch]*4,accs )

        # losses = [train_metrics['loss'],val_metrics['loss']]
        # loss_logger.log([epoch]*2,losses )
        # train_confusion_logger.log(train_confusion_meter.value())
        # test_confusion_logger.log(val_confusion_meter.value())

        # # log split loss
        # if epoch == args.start_epoch:
        #     loss_key = []
        #     for key in [k for k,v in train_metrics.items()] :
        #         if 'ls' in key: loss_key.append(key)
        #     loss_split_key = ['train_'+k for k in loss_key] + ['val_'+k for k in loss_key]
        #     loss_logger_split.opts['legend'] = loss_split_key

        # loss_split = [train_metrics[k] for k in loss_key]+[val_metrics[k] for k in loss_key]
        # loss_logger_split.log([epoch] * len(loss_split_key),loss_split)
        
        x.append(epoch+1)
        y_loss.append(train_metrics['ls_all'])
        y_acc.append(val_acc)


    #画图
    # plt.subplots(1, 1)
    plt.rcParams['font.sans-serif'] = ['SimHei']#可以plt绘图过程中中文无法显示的问题
    plt.plot(x,y_loss, color='red', linewidth = '2', label = 'loss', linestyle='-', marker=' ')    #画图
    plt.plot(x,y_acc, color='green', linewidth = '2', label = 'acc', linestyle='-', marker=' ')    #画图

    plt.savefig(args.model_dir+"loss.png")
    plt.legend(loc='upper left')#显示图例,如果注释改行,即使设置了图例仍然不显示
    


if __name__ == '__main__':
    # 解析参数
    args = parser.parse_args()
    # 模型路径
    model_dir =  args.model_dir
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)

    # 加载params.json文件
    json_file = args.json_file
    params = utils.Params(json_file)

    # 为可重复的实验设置随机种子
    torch.manual_seed(params.seed)
    np.random.seed(params.seed)
    random.seed(params.seed)

    # 设置加速参数
    if params.gpu_id:
        params.cuda = True
        torch.cuda.manual_seed(params.seed)

    torch.backends.cudnn.deterministic = False  # 可以大大提升卷积神经网络的运行速度,网络结构经常变的话用False
    cudnn.benchmark = True   # 提升一点训练速度,同上一行
    torch.cuda.empty_cache() # 自动回收我们不用的显存,能提速

    # 定义模型和参数
    model = HCN.HCN(**params.model_args)
    loss_fn = HCN.loss_fn
    metrics = HCN.metrics
    # if params.data_parallel:
    #     model = torch.nn.DataParallel(model).cuda()   # 多GPU使用
    # else:
    #     model = model.cuda(params.gpu_id)   # 单个GPU使用

    # 定义优化器
    if params.optimizer == 'Adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=params.lr, 
                    betas=(0.9, 0.999), eps=1e-8,weight_decay=params.weight_decay)
    elif params.optimizer == 'SGD':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=params.lr, 
                    momentum=0.9,nesterov=True,weight_decay=params.weight_decay)

    # Set the logger
    if args.type =='train':
        utils.set_logger(os.path.join(model_dir,'train.log'))
    elif args.type =='test':
        utils.set_logger(os.path.join(model_dir, 'test.log'))
    elif args.type == 'load_train':
        utils.set_logger(os.path.join(model_dir, 'load_train.log'))
    logger = logging.getLogger()


    # # visdom可视化
    # port,env = 8097,params.model_version
    # columnnames,rownames = list(range(1,params.model_args["num_class"]+1)),list(range(1,params.model_args["num_class"]+1))
    # loss_logger = VisdomPlotLogger('line',port=port,opts={'title': params.model_dir + '_Loss','legend':['train','test']}, win=None,env=env)
    # loss_logger_split = VisdomPlotLogger('line', port=port,opts={'title': params.model_dir + '_Loss_split'},win=None, env=env)
    
    # error_logger15 = VisdomPlotLogger('line', port=port, opts={'title': params.model_dir + '_Error @top1@top2',
    #        'legend': ['train@top1','train@top2','test@top1','test@top2']}, win=None, env=env)
    # train_confusion_logger = VisdomLogger('heatmap', port=port, opts={'title': params.model_dir + 'train_Confusion matrix',
    #     'columnnames': columnnames,'rownames': rownames},win=None,env=env)
    # test_confusion_logger = VisdomLogger('heatmap', port=port, opts={'title': params.model_dir + 'test_Confusion matrix',
    #     'columnnames':columnnames,'rownames': rownames},win=None,env=env)
    # diff_confusion_logger = VisdomLogger('heatmap', port=port, opts={'title': params.model_dir + 'diff_Confusion matrix',
    #     'columnnames':columnnames,'rownames': rownames},win=None,env=env)

    # # 打印参数信息
    # d_args = vars(args)
    # for k in d_args.keys():
    #     logging.info('{0}: {1}'.format(k, d_args[k]))
    # d_params = vars(params)
    # for k in d_params.keys():
    #     logger.info('{0}: {1}'.format(k, d_params[k]))
    # logger.info(model)  #打印模型信息

    
    # 加载数据集
    train_loader,train_label, valid_loader,valid_label, test_loader,test_label=data_loader.dataset(args)
    print(len(train_loader))
    # 把视频名字和标签分离
    if split_label==1:
        train_label, train_vid = get_label_vid(train_label)
        valid_label, vaild_vid = get_label_vid(valid_label)
        test_label, test_vid = get_label_vid(test_label)

    # 模型训练和预测
    if args.type == 'train':
        params.restore_file=None
        train_and_evaluate(model, train_loader,train_label, valid_loader,valid_label, optimizer, loss_fn, 
                           metrics, params, args, logger)
    if args.type == 'load_train':
        train_and_evaluate(model, train_loader,train_label, valid_loader,valid_label, optimizer, loss_fn, 
                           metrics, params, args, logger)

test.py

import argparse
import os
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from utils import  data_loader


parser = argparse.ArgumentParser()
parser.add_argument('--restore_file', default="model/HCN/test8.2/best.pth.tar", help="dataset name ")

# 数据集
split_label=1
data_forder = "test8/joint/clear/2+3+4"
parser.add_argument('--train_path', type=str, default='dataset2/'+str(data_forder)+'/train.pkl')
parser.add_argument('--valid_path', type=str, default='dataset2/'+str(data_forder)+'/valid.pkl')
parser.add_argument('--test_path',  type=str, default='dataset2/'+str(data_forder)+'/test.pkl')
parser.add_argument('--batch_size',  type=int, default=1)
f1=open("dataset2/error.txt",'w+')

def test_only(test_loader, test_label, test_vid, args):
    model=torch.load(args.restore_file, map_location='cpu')   #这里已经不需要重构模型结构了,直接load就可以
    model.eval()
    # for i in model.parameters():
    #     print(i)

    summ = []
    logits = []
    preds = []
    total=0
    # 预测
    for i, data_batch in enumerate(test_loader):
        data_batch=data_batch[0].float()
        labels_batch=test_label[i].view(1).long()

        # convert to torch Variables
        data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)

        # compute model output
        output_batch = model(data_batch)
        
        # extract data from torch Variable, move to cpu, convert to numpy arrays
        output_batch = output_batch.data
        labels_batch = labels_batch.data
        

        # 返回预测的数值和标签
        result=F.log_softmax(output_batch,dim=1)
        logit, pred = result.topk(k=1,dim=1, largest=True,sorted= True)
        logits.append(logit)
        preds.append(pred)
        # print(logit, pred)
        
        # 计算准确度
        if labels_batch==pred:
            total+=1
        else:
            if split_label==1:
                print("true:",int(labels_batch), "pred:",int(pred))
                f1.write(str(int(labels_batch))+'  '+str(int(pred))+'  '+test_vid[i]+'\n')

    logits=torch.cat(logits,dim=0)
    preds = torch.cat(preds,dim=0)
    print("acc:",total/len(preds))

    return logits,preds

# 把视频名字和标签分离
def get_label_vid(data):
    vid=[]
    label=[]
    for i in  data:
        label.append(int(i[2]))
        name=str(int(i[0]))+'_'+str(int(i[1]))+'.mp4'
        vid.append(name)
        
    labels=torch.from_numpy(np.array(label))
    return labels,np.array(vid)


# 参数解析
args = parser.parse_args()

# 加载数据集2
train_loader,train_label, valid_loader,valid_label, test_loader,test_label=data_loader.dataset(args)

# 把视频名字和标签分离
if split_label==1:
    train_label, train_vid = get_label_vid(train_label)
    valid_label, vaild_vid = get_label_vid(valid_label)
    test_label, test_vid = get_label_vid(test_label)

    # 模型预测
    test_only(test_loader, test_label, test_vid, args)
else:
    test_vid=[]
    test_only(test_loader, test_label, test_vid, args)

model/params_train.json   (cpu)

{
    "gpu_id": null,
    "cuda":false,
    "data_parallel": false,

    "seed": 0,
    "optimizer":"Adam",
    "weight_decay":1e-4,

    "lr": 0.0001,
    "lr_decay_type":"exp",
    "lr_step": [80,150,200],
    "gamma1":0.1,
    "gamma2":0.99,
    "gamma3":0.5,
    "patience":10,


    "model_args":{
      "in_channel":3,
      "out_channel":64,
      "window_size":32,
      "num_joint":15,
      "num_person":1,
      "num_class":4},

    "loss_args":{
      "type":"CE"}
}

model/params_test.json

{
    "gpu_id": 0,
    "cuda":false,
    
    "seed": 0,
    "weight_decay":1e-4,
    "lr": 0.001,
    "optimizer":"Adam",
    "batch_size": 1,    

    "model_args":{
      "in_channel":3,
      "out_channel":64,
      "window_size":32,
      "num_joint":15,
      "num_person":1,
      "num_class":4
    }
}

使用gpu训练:

train.py  (gpu)

import sys
import argparse
import logging
import os
import random
import numpy as np
import torch
import json
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import MultiStepLR,ExponentialLR,ReduceLROnPlateau
from torch.autograd import Variable
from tqdm import tqdm
tqdm.monitor_interval = 0
# import torchnet
# # from torchnet.meter import ConfusionMeter,aucmeter
# from torchnet.logger import VisdomPlotLogger, VisdomLogger,MeterLogger 
import torch.backends.cudnn as cudnn
import time
from utils import  utils
from utils.utils import str2bool
from utils import  data_loader
from utils import HCN


parser = argparse.ArgumentParser()
# 模型路径
parser.add_argument('--json_file', default='model/params_train.json', help="dataset name ")
parser.add_argument('--model_dir', default='model/HCN/test7.1/',help="parents directory of model")
parser.add_argument('--type', default='train', help='train,or load_train')

# 训练轮数
parser.add_argument('--start_epoch', default=0, help='')
parser.add_argument('--num_epochs', default=200, help='')
parser.add_argument('--every_n_epoch', default=20, help='')
parser.add_argument('--summary_steps', default=1, help='')

# 数据集
data_forder = "test7-4s/clear2/2+3-811"
parser.add_argument('--train_path', type=str, default='dataset/'+str(data_forder)+'/train.pkl')
parser.add_argument('--valid_path', type=str, default='dataset/'+str(data_forder)+'/valid.pkl')
parser.add_argument('--test_path',  type=str, default='dataset/'+str(data_forder)+'/test.pkl')
parser.add_argument('--batch_size',  type=int, default=1)


def train(model, optimizer, loss_fn, train_loader, train_label, metrics, params,logger):
    model.train()  # model.train():启用 BatchNormalization、Dropout    model.eval():不启用

    # summary for current training loop and a running average object for loss
    summ = []
    loss_avg = utils.RunningAverage()
    # confusion_meter = torchnet.meter.ConfusionMeter(params.model_args["num_class"], normalized=True)
    # confusion_meter.reset()

    # 设置tqdm训练记录器
    # with tqdm(total=len(train_loader)) as t:
    for i, data_batch in enumerate(train_loader):
        data_batch=data_batch[0].float()
        labels_batch=train_label[i].view(1).long()
        # print(type(data_batch),type(labels_batch))
        
        # 判断是否用gpu
        if params.cuda:
            data_batch, labels_batch = data_batch.cuda(params.gpu_id), labels_batch.cuda(params.gpu_id)

        # convert to torch Variables
        data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)

        # compute model output and loss
        output_batch = model(data_batch,target=labels_batch)

        loss_bag = loss_fn(output_batch,labels_batch,current_epoch=params.current_epoch, params=params)
        loss = loss_bag['ls_all']

        # confusion_meter.add(output_batch.data,labels_batch.data)
        
        optimizer.zero_grad()   # 每个batch清除一次梯度
        loss.backward()   # 反向传播loss,计算梯度
        optimizer.step()   # 使用计算出的梯度执行更新

        # 每n步计算一遍预测结果
        if i % args.summary_steps == 0:
            # extract data from torch Variable, move to cpu, convert to numpy arrays
            output_batch = output_batch.data
            labels_batch = labels_batch.data

            # compute all metrics on this batch
            summary_batch = {metric:metrics[metric](output_batch, labels_batch) for metric in metrics}
            
            for l,v in loss_bag.items():
                summary_batch[l]=v.data.item()
            summ.append(summary_batch)

        # 更新平均损失,主要用于进度条
        loss_running = loss.data.item()
        loss_avg.update(loss_running )

        # t.set_postfix(loss_running ='{:05.3f}'.format(loss_avg()))
        # t.update()

    # compute mean of all metrics in summary
    metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}
    metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
    logger.info("- Train metrics: " + metrics_string)

    return metrics_mean

def evaluate(model, loss_fn, valid_loader,valid_label, metrics, params,logger):
    model.eval()
    # summary for current eval loop
    summ = []
    # confusion_meter = torchnet.meter.ConfusionMeter(params.model_args["num_class"], normalized=True)
    # confusion_meter.reset()

    # compute metrics over the dataset
    for i, data_batch in enumerate(train_loader):
        data_batch=data_batch[0].float()
        labels_batch=train_label[i].view(1).long()
        # print(type(data_batch),type(labels_batch))
        
        # 判断是否用gpu
        if params.cuda:
            data_batch, labels_batch = data_batch.cuda(params.gpu_id), labels_batch.cuda(params.gpu_id)

        # fetch the next evaluation batch
        data_batch, labels_batch = Variable(data_batch), Variable(labels_batch)

        # compute model output
        output_batch = model(data_batch)

        loss_bag  = loss_fn(output_batch,labels_batch,current_epoch=params.current_epoch, params=params)
        loss = loss_bag['ls_all']

        # confusion_meter.add(output_batch.data,labels_batch.data)

        # extract data from torch Variable, move to cpu, convert to numpy arrays
        output_batch = output_batch.data
        labels_batch = labels_batch.data

        # compute all metrics on this batch
        summary_batch = {metric: metrics[metric](output_batch, labels_batch) for metric in metrics}
        
        for l, v in loss_bag.items():
            summary_batch[l] = v.data.item()
        summ.append(summary_batch)

    # compute mean of all metrics in summary
    metrics_mean = {metric: np.mean([x[metric] for x in summ]) for metric in summ[0]}
    metrics_string = " ; ".join("{}: {:05.3f}".format(k, v) for k, v in metrics_mean.items())
    # logger.info("- Eval metrics : " + metrics_string)

    return metrics_mean

def train_and_evaluate(model, train_loader,train_label, valid_loader,valid_label, optimizer,loss_fn, metrics, params, args, logger):
    best_val_acc = 0.0

    # 恢复模型
    if args.type == 'load_train':
        restore_file=os.path.join(args.model_dir, 'last.pth.tar')
        logging.info("Restoring parameters from {}".format(restore_file))

        checkpoint = utils.load_checkpoint(restore_file, model, optimizer)
        args.start_epoch = checkpoint['epoch']

        best_val_acc = checkpoint['best_val_acc']
        print('best_val_acc=',best_val_acc)
        print(optimizer.state_dict()['param_groups'][0]['lr'], checkpoint['epoch'])

    # 按需调整学习率:适合后期调试使用,观察 loss 曲线,为每个实验定制学习率调整时机。
    if params.lr_decay_type == 'multistep':
        scheduler = MultiStepLR(optimizer, milestones=params.lr_step, gamma=params.gamma1,last_epoch= args.start_epoch-1)
    # 指数衰减调整学习率
    elif params.lr_decay_type == 'exp':
        scheduler =  ExponentialLR(optimizer, gamma=params.gamma2,last_epoch=args.start_epoch - 1)
    # 自适应调整学习率::当某指标不再变化(如:acc、loss),调整学习率
    elif params.lr_decay_type == 'plateau':
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=params.gamma3, patience=params.patience, 
                    verbose=False,threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0,eps=1e-08)

    # 训练模型
    for epoch in range(args.start_epoch,args.num_epochs):
        star=time.time()
        params.current_epoch = epoch

        # 学习率衰减
        if params.lr_decay_type != 'plateau':
            scheduler.step()

        # 对一个batch的数据进行训练
        train_metrics = train(model, optimizer, loss_fn, train_loader, train_label, metrics, params,logger)

        # 模型验证
        val_metrics = evaluate(model, loss_fn, valid_loader,valid_label, metrics, params,logger)

        # 打印时间/进度
        end=time.time()-star
        logger.info("Epoch {}/{};   Time:{:05.3f}".format(epoch + 1, args.num_epochs, end))

        # 学习率衰减
        if params.lr_decay_type == 'plateau':
            scheduler.step(val_metrics['ls_all'])

        # 验证集测试结果
        val_acc = val_metrics['accuracytop1']
        is_best = val_acc >= best_val_acc
        val_metrics['best_epoch'] = epoch + 1


        # # 模型保存1
        utils.save_checkpoint(model,epoch= epoch+1,is_best=is_best,every_n_epoch = args.every_n_epoch,
                               checkpoint = args.model_dir)
        
        # # 模型保存2
        # utils.save_checkpoint( {'epoch': epoch + 1,
        #                        'state_dict': model.state_dict(),
        #                        'optim_dict': optimizer.state_dict(),
        #                         'best_val_acc':best_val_acc},
        #                       epoch= epoch+1,
        #                       is_best=is_best,
        #                       every_n_epoch = args.every_n_epoch,
        #                       checkpoint = args.model_dir)

        # 把最新的Val指标保存在json文件中
        last_json_path = os.path.join(args.model_dir, "val_last_weights.json")
        utils.save_dict_to_json(val_metrics, last_json_path)

        # 如果有更优的,则把最优的Val指标保存在json文件中
        if is_best:
            best_val_acc = val_acc
            best_json_path = os.path.join(args.model_dir, "val_best_weights.json")
            utils.save_dict_to_json(val_metrics, best_json_path)
            # logger.info("- Found new best accuracy")

        if train_metrics['ls_all']<=0.001:
            print("loss=",train_metrics['ls_all'])
            break

        # # vis logger
        # accs = [100. * (1 - train_metrics['accuracytop1']),100. * (1 - train_metrics['accuracytop2']),
        #        100. * (1 - val_metrics['accuracytop1']),100. * (1 - val_metrics['accuracytop2']),]
        # error_logger15.log([epoch]*4,accs )

        # losses = [train_metrics['loss'],val_metrics['loss']]
        # loss_logger.log([epoch]*2,losses )
        # train_confusion_logger.log(train_confusion_meter.value())
        # test_confusion_logger.log(val_confusion_meter.value())

        # # log split loss
        # if epoch == args.start_epoch:
        #     loss_key = []
        #     for key in [k for k,v in train_metrics.items()] :
        #         if 'ls' in key: loss_key.append(key)
        #     loss_split_key = ['train_'+k for k in loss_key] + ['val_'+k for k in loss_key]
        #     loss_logger_split.opts['legend'] = loss_split_key

        # loss_split = [train_metrics[k] for k in loss_key]+[val_metrics[k] for k in loss_key]
        # loss_logger_split.log([epoch] * len(loss_split_key),loss_split)
    
    end2=time.time()-star
    logger.info("Epoch {};   总耗时:{:05.3f}".format(epoch + 1, end2))

if __name__ == '__main__':
    # 解析参数
    args = parser.parse_args()
    # 模型路径
    model_dir =  args.model_dir
    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)

    # 加载params.json文件
    json_file = args.json_file
    params = utils.Params(json_file)

    # 为可重复的实验设置随机种子
    torch.manual_seed(params.seed)
    np.random.seed(params.seed)
    random.seed(params.seed)

    # 设置加速参数
    if params.gpu_id:
        params.cuda = True
        torch.cuda.manual_seed(params.seed)

    torch.backends.cudnn.deterministic = False  # 可以大大提升卷积神经网络的运行速度,网络结构经常变的话用False
    cudnn.benchmark = True   # 提升一点训练速度,同上一行
    torch.cuda.empty_cache() # 自动回收我们不用的显存,能提速

    # 定义模型和参数
    model = HCN.HCN(**params.model_args)
    loss_fn = HCN.loss_fn
    metrics = HCN.metrics
    if params.data_parallel:
        model = torch.nn.DataParallel(model).cuda()   # 多GPU使用
    else:
        model = model.cuda(params.gpu_id)   # 单个GPU使用

    # 定义优化器
    if params.optimizer == 'Adam':
        optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=params.lr, 
                    betas=(0.9, 0.999), eps=1e-8,weight_decay=params.weight_decay)
    elif params.optimizer == 'SGD':
        optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=params.lr, 
                    momentum=0.9,nesterov=True,weight_decay=params.weight_decay)

    # Set the logger
    if args.type =='train':
        utils.set_logger(os.path.join(model_dir,'train.log'))
    elif args.type =='test':
        utils.set_logger(os.path.join(model_dir, 'test.log'))
    elif args.type == 'load_train':
        utils.set_logger(os.path.join(model_dir, 'load_train.log'))
    logger = logging.getLogger()


    # # visdom可视化
    # port,env = 8097,params.model_version
    # columnnames,rownames = list(range(1,params.model_args["num_class"]+1)),list(range(1,params.model_args["num_class"]+1))
    # loss_logger = VisdomPlotLogger('line',port=port,opts={'title': params.model_dir + '_Loss','legend':['train','test']}, win=None,env=env)
    # loss_logger_split = VisdomPlotLogger('line', port=port,opts={'title': params.model_dir + '_Loss_split'},win=None, env=env)
    
    # error_logger15 = VisdomPlotLogger('line', port=port, opts={'title': params.model_dir + '_Error @top1@top2',
    #        'legend': ['train@top1','train@top2','test@top1','test@top2']}, win=None, env=env)
    # train_confusion_logger = VisdomLogger('heatmap', port=port, opts={'title': params.model_dir + 'train_Confusion matrix',
    #     'columnnames': columnnames,'rownames': rownames},win=None,env=env)
    # test_confusion_logger = VisdomLogger('heatmap', port=port, opts={'title': params.model_dir + 'test_Confusion matrix',
    #     'columnnames':columnnames,'rownames': rownames},win=None,env=env)
    # diff_confusion_logger = VisdomLogger('heatmap', port=port, opts={'title': params.model_dir + 'diff_Confusion matrix',
    #     'columnnames':columnnames,'rownames': rownames},win=None,env=env)

    # # 打印参数信息
    # d_args = vars(args)
    # for k in d_args.keys():
    #     logging.info('{0}: {1}'.format(k, d_args[k]))
    # d_params = vars(params)
    # for k in d_params.keys():
    #     logger.info('{0}: {1}'.format(k, d_params[k]))
    # logger.info(model)  #打印模型信息

    
    # 加载数据集
    train_loader,train_label, valid_loader,valid_label, test_loader,test_label=data_loader.dataset(args)

    # 模型训练和预测
    if args.type == 'train':
        params.restore_file=None
        train_and_evaluate(model, train_loader,train_label, valid_loader,valid_label, optimizer, loss_fn, 
                           metrics, params, args, logger)
    if args.type == 'load_train':
        train_and_evaluate(model, train_loader,train_label, valid_loader,valid_label, optimizer, loss_fn, 
                           metrics, params, args, logger)

model/params_train.json   (gpu)

{
    "gpu_id": 0,
    "cuda":true,
    "data_parallel": false,

    "seed": 0,
    "optimizer":"Adam",
    "weight_decay":1e-4,

    "lr": 0.0001,
    "lr_decay_type":"exp",
    "lr_step": [80,150,200],
    "gamma1":0.1,
    "gamma2":0.99,
    "gamma3":0.5,
    "patience":10,


    "model_args":{
      "in_channel":3,
      "out_channel":64,
      "window_size":32,
      "num_joint":15,
      "num_person":1,
      "num_class":4},

    "loss_args":{
      "type":"CE"}
}

  • 1
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: 华中HCN-21/22车螺纹是一种常用于长途旅行或公共交通的车辆。它由华中公司设计及制造,具有很好的质量与性能。 这款车型采用了先进的技术和工艺,使其具有出色的行驶稳定性和可靠性。它配备了强力发动机,能够提供足够的动力,以应对各种路况和载重要求。此外,它还具有先进的悬挂系统和制动系统,以确保更安全和舒适的行驶体验。 华中HCN-21/22车螺纹还注重乘客的舒适性和便利性。车内布局合理,内部空间宽敞,座椅舒适,提供了乘客所需的舒适性。车辆还配备了先进的空调系统和娱乐设施,使乘客在行驶过程中能够享受到更好的乘坐体验。 另外,这款车型还具有良好的燃油经济性和环保性能。它采用了先进的发动机技术和燃油管理系统,能够有效节约燃油消耗,减少碳排放。这对于减少对环境的负担和降低运营成本非常有益。 总的来说,华中HCN-21/22车螺纹是一款经过精心设计和制造的优质车辆。它具有出色的性能、舒适性、燃油经济性和环保性能,为乘客提供了安全、舒适、便利的乘坐体验。无论是长途旅行还是公共交通,这款车辆都能够满足用户的需求。 ### 回答2: 华中hcn-21/22车螺纹是指一种常见的汽车螺纹规格,常用于车辆的螺母和螺栓连接。螺纹是一种螺旋形状的花纹,有助于实现物件的紧固和固定。 华中hcn-21/22车螺纹的具体规格为M21x2和M22x2。其中,“M”表示螺纹的指标是按照米制单位来表示的;“21”和“22”表示螺纹的直径大小,单位是毫米;“2”表示螺距,即每个螺纹的间距,单位也是毫米。 此规格的螺纹被广泛应用于汽车领域,例如车轮螺母的固定和底盘零部件的连接等。它具有良好的牢固性和抗震能力,可以确保车辆运行时螺母和螺栓的连接处不会松动。 为了正确安装和拆卸螺母和螺栓,我们需要使用专用的工具,如扳手和套筒等,并遵循正确的操作步骤。在紧固螺母时,我们需要适当施加扭矩,以确保螺栓的连接紧固度恰到好处,但也不会使螺纹损坏。 总之,华中hcn-21/22车螺纹是一种常见的汽车螺纹规格,广泛应用于汽车领域的紧固连接中,并通过合适的工具和正确的操作来保证螺纹的牢固性和连接效果。 ### 回答3: 华中hcn-21/22车螺纹是一种常见的螺纹规格,主要用于汽车领域。它采用了国际通用标准,具有一定的标准化和互换性。这种螺纹规格具有以下特点: 首先,华中hcn-21/22车螺纹具有合理的设计结构,具备良好的密封性能。它的螺旋形状能够紧密配合螺纹孔,从而有效防止液体或气体的泄漏。 其次,华中hcn-21/22车螺纹具有易于安装和拆卸的特点。该螺纹规格采用标准的螺纹连接方式,使得安装和拆卸过程简单方便,无需特殊工具,节省了时间和人力成本。 此外,华中hcn-21/22车螺纹具有一定的强度和耐用性。它采用高质量的材料制造,在经过特殊处理后具有较高的硬度和机械性能,能够承受较大的拉力和扭矩。 最后,华中hcn-21/22车螺纹具有广泛的应用范围。它常用于汽车制造、汽车修理和维护等领域,适用于各种车辆的连接和固定。 总而言之,华中hcn-21/22车螺纹是一种常用的标准螺纹规格,具有良好的密封性能、易于安装和拆卸、较高的强度和耐用性,广泛应用于汽车领域。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值