手部姿态检测(按视频段)_v4(2s-agcn)

参考链接:

https://github.com/lshiwjx/2s-AGCN

https://blog.csdn.net/zahidzqj/article/details/95813290

https://www.jiqizhixin.com/articles/2020-01-15-14

2s-agcn(双流自适应图卷积):
1.2019年提出的一种新的自适应图卷积网络,改进了stgcn的一些缺点
2.双流是:分别训练关节(单个关键点)和骨骼(两个关键点之间),然后把两个模型的softmax结果相加
3.自适应:提出了更加合理的邻接矩阵策略,增强了网络对空间特征的抽取能力


试验结果:

代码工程:

双流分别训练两个模型,训练骨架模型+骨骼模型,然后测试的时候合并进行测试。

本章仅贴出关键文件,其余文件可从参考链接下载。

data.py  数据处理

from torch.utils import data
import torch
import os
import random
import numpy as np

def get_feature(file_name, save_path, split_label, frame_nums, list1, list2, random_nums=None):
	f = open(file_name)
	lines = f.readlines()

	prev_video = int(lines[0].strip().split(' ')[1])   # 视频编号
	prev_categ = int(lines[0].strip().split(' ')[2])   # 类别标签

	datas=[]
	datas_label=[]
	frames = []
	train = []
	valid = []
	test  = []
	train_label = []
	valid_label = []
	test_label  = []
	m=0

	for line in lines:
		line = line.strip().split(' ')
		vid = int(line[1])   # 视频编号
		aid = int(line[0])   # 任务编号
		cid = int(line[2])   # 类别标签
		label=list(map(int, line[:3]))
		features = list(map(float, line[3:]))   # 21个特征点
		
		# 若是视频标签相同,则都放入数组中,作为一条训练数据
		if prev_video == vid:
			frames.append(np.reshape(np.asarray(features), (-1,3)))   # 把一维转换成[15,3]的格式
		else:
			# 如果一条视频帧数过多,则选取前frame_nums帧,并连接起来,转成torch格式
			if len(frames) >= frame_nums:
				# frames = random.sample(frames, frame_nums)   # 随机取帧
				frames = frames[0:frame_nums]    # 按顺序取帧
				frames = torch.from_numpy(np.stack(frames, 0))  # 把每一帧在0维连接起来,转成torch格式

			# 若是视频帧数不够多,则利用线性插值,把数据补充到frame_nums帧
			else:
				frames = np.stack(frames, 0) # 把每一帧连接起来,如:n帧 n*[1,15,3]=[n,15,3] 作为一条数据
				xloc = np.arange(frames.shape[0])   # np.arange:生成n个自然数,即等于frame_nums帧数
				new_xloc = np.linspace(0, frames.shape[0], frame_nums)  # 生成start和end之间frame_nums个等差间隔的元素,如:1、2、··n
				frames = np.reshape(frames, (frames.shape[0], -1)).transpose()  # transpose:矩阵转置
				# print(frames.shape,xloc.shape,new_xloc.shape)
				
				new_datas = []
				for data in frames:
					new_datas.append(np.interp(new_xloc, xloc, data))   # interp:进行线性插值, 获得frame_nums帧数据
				frames = torch.from_numpy(np.stack(new_datas, 0)).t()  # 把n帧数据再次连接起来,转换torch格式

			frames = frames.view(frame_nums, -1, 3)  # 强制reshape矩阵形状
			datas.append(frames)   #数据
			if split_label==1:
				labels=label   #标签
			else:
				labels=prev_categ   #标签
			datas_label.append(labels)

			m+=1
			# 2.按人物编号分
			if aid in list1:
				test.append(frames)
				test_label.append(labels)
			elif aid in list2:
				valid.append(frames)
				valid_label.append(labels)
			else:
				train.append(frames)
				train_label.append(labels)

			frames = [np.reshape(np.asarray(features), (-1,3))]  # frames重置,等于每条视频的第一帧的关键点 

		prev_actor = aid   # 人物编号重置
		prev_video = vid   # 视频编号重置
		prev_categ = cid   # 标签重置

	# # 3.随机划分
	# lens=len(datas)
	# if not random_nums:
	# 	random_nums=random.sample(range(lens),lens)   #获取随机数

	# for i in range(lens):
	# 	index=random_nums[i]
	# 	if i <=int(lens*0.7):
	# 		train.append(datas[index])
	# 		train_label.append(datas_label[index])
	# 	elif i <=int(lens*0.9):
	# 		valid.append(datas[index])
	# 		valid_label.append(datas_label[index])
	# 	else:
	# 		test.append(datas[index])
	# 		test_label.append(datas_label[index])



	train_label = torch.from_numpy(np.asarray(train_label))
	valid_label = torch.from_numpy(np.asarray(valid_label))
	test_label  = torch.from_numpy(np.asarray(test_label))
	print(len(train_label),len(valid_label),len(test_label))
	print(test_label.shape)


	torch.save((torch.stack(train, 0), train_label), save_path+'train.pkl')
	torch.save((torch.stack(valid, 0), valid_label), save_path+'valid.pkl')
	torch.save((torch.stack(test, 0),  test_label),  save_path+'test.pkl')

	return random_nums



list1=[15,20,25,35,42,52]   # 测试人物编号
list2=[14,19,24,33,41,51]   # 验证测试编号

split_label=1   # 判断是否在标签中加入视频名字
frame_nums=64   # 帧数

forder='2+3+4+5+6'
forder2='2+3+4+5+6_v2'
file_name1 = 'data/test9/joint/clear/'+str(forder)+'.txt'
save_path1='data/test9/joint/clear/'+str(forder2)+"/"
if os.path.exists(save_path1) == False:
	os.makedirs(save_path1)
random_nums1=get_feature(file_name1, save_path1, split_label, frame_nums, list1, list2)


file_name1 = 'data/test9/bone/clear/'+str(forder)+'.txt'
save_path1='data/test9/bone/clear/'+str(forder2)+"/"
if os.path.exists(save_path1) == False:
	os.makedirs(save_path1)
random_nums=get_feature(file_name1, save_path1, split_label, frame_nums, list1, list2, random_nums1)

train.py   (joint+bone)

分别训练骨骼模型和骨架模型,只需要修改代码中的路径即可。

from __future__ import print_function
import argparse
import os
import pickle
import random
import time
from collections import OrderedDict
import numpy as np
from utils import agcn,aagcn
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import _LRScheduler
import torch.utils.data as data


def get_parser():
    parser = argparse.ArgumentParser()

    # data
    data_forder = "test8/joint/clear/2"
    parser.add_argument('--train_path', type=str, default='data/'+str(data_forder)+'/train.pkl')
    parser.add_argument('--valid_path', type=str, default='data/'+str(data_forder)+'/valid.pkl')
    parser.add_argument('--test_path',  type=str, default='data/'+str(data_forder)+'/test.pkl')
    parser.add_argument('--batch_size',  type=int, default=8)
    parser.add_argument('--split_label',type=int,default=1)

    # model
    parser.add_argument('--phase', default='train')
    parser.add_argument('--type', default='joint')
    parser.add_argument('--model_dir', default='model/test1/joint/')
    # weight
    parser.add_argument('--weights', default=None,help='model/test1/joint/joint-1-128.pt')
    parser.add_argument('--model_name', default='utils.agcn.Model')
    # epoch
    parser.add_argument('--start_epoch',type=int,default=0)
    parser.add_argument('--num_epoch',type=int,default=10)
    parser.add_argument('--save_interval',type=int,default=1)

    # gpu
    parser.add_argument('--device',type=int,default=0)
    parser.add_argument('--parallel',default=False,help='多GPU使用')

    # optim
    parser.add_argument('--base_lr', type=float, default=0.1, help='initial learning rate')
    parser.add_argument('--step',type=int,default=[30, 50, 80],nargs='+')
    parser.add_argument('--warm_up_epoch', default=0)
    parser.add_argument('--optimizer', default='SGD', help='type of optimizer')    
    parser.add_argument('--weight-decay',type=float,default=0.0001,help='weight decay for optimizer')
    parser.add_argument('--dropout',type=float,default=0.2,help='weight decay for optimizer')

    # 啥玩意?
    parser.add_argument('--only_train_part', default=False)
    parser.add_argument('--only_train_epoch', default=0)

    return parser

# 学习率预热,学习率实时调整
class GradualWarmupScheduler(_LRScheduler):
    def __init__(self, optimizer, total_epoch, after_scheduler=None):
        self.total_epoch = total_epoch
        self.after_scheduler = after_scheduler
        self.finished = False
        self.last_epoch = -1
        super().__init__(optimizer)

    def get_lr(self):
        return [base_lr * (self.last_epoch + 1) / self.total_epoch for base_lr in self.base_lrs]

    def step(self, epoch=None, metric=None):
        if self.last_epoch >= self.total_epoch - 1:
            if metric is None:
                return self.after_scheduler.step(epoch)
            else:
                return self.after_scheduler.step(metric, epoch)
        else:
            return super(GradualWarmupScheduler, self).step(epoch)

class Processor():
    def __init__(self, arg):
        self.arg = arg      
        # 定义参数
        self.global_step = 0
        self.best_acc = 0
        self.lr = self.arg.base_lr
        self.load_model()
        self.load_optimizer()
        self.load_data()
    
    # 加载数据
    def load_data(self):
        if self.arg.phase=="test":
            self.arg.valid_path=self.arg.test_path
            
        # torch的数据加载方法
        train_tensor, train_label = torch.load(self.arg.train_path)
        valid_tensor, valid_label = torch.load(self.arg.valid_path)
        test_tensor , test_label  = torch.load(self.arg.test_path)

        # 把视频名字和标签分离
        if self.arg.split_label==1:
            train_label, self.train_vid = get_label_vid(train_label)
            valid_label, self.vaild_vid = get_label_vid(valid_label)
            test_label, self.test_vid = get_label_vid(test_label)

        self.train_loader = data.DataLoader(data.TensorDataset(train_tensor, train_label),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.valid_loader = data.DataLoader(data.TensorDataset(valid_tensor, valid_label),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.test_loader = data.DataLoader(data.TensorDataset(test_tensor, test_label),
                                       batch_size = self.arg.batch_size, shuffle=False)

    # 加载数据2
    def load_data2(self):
        if self.arg.phase=="test":
            self.arg.valid_path=self.arg.test_path
            
        # torch的数据加载方法
        train_tensor, train_label = torch.load(self.arg.train_path)
        valid_tensor, valid_label = torch.load(self.arg.valid_path)
        test_tensor , test_label  = torch.load(self.arg.test_path)

        # 数据加载器,一次性加载所有数据,每次取出batch个数据
        self.train_loader = data.DataLoader(data.TensorDataset(train_tensor.to()),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.valid_loader = data.DataLoader(data.TensorDataset(valid_tensor.to()),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.test_loader  = data.DataLoader(data.TensorDataset(test_tensor.to()),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.train_label = train_label.to()
        self.valid_label = valid_label.to()
        self.test_label  = test_label.to()
        # print(len(self.valid_loader),len(self.valid_label))

        # 把视频名字和标签分离
        if self.arg.split_label==1:
            self.train_label, self.train_vid = get_label_vid(train_label)
            self.valid_label, self.vaild_vid = get_label_vid(valid_label)
            self.test_label, self.test_vid = get_label_vid(test_label)

        # return train_loader,train_label, valid_loader,valid_label, test_loader,test_label

    # 定义模型/加载模型
    def load_model(self):
        # # 定义模型1
        # Model = import_class(self.arg.model_name)
        # self.model = Model().cpu()
        
        # 定义模型2
        self.model=agcn.Model(dropout=self.arg.dropout).cpu()
        self.loss = nn.CrossEntropyLoss().cpu()

        # 判断是否加载预训练模型
        if self.arg.weights:
            self.global_step = int(arg.weights[:-3].split('-')[-1])
            print('Load weights from {}.'.format(self.arg.weights))

            # 加载权重文件
            if '.pkl' in self.arg.weights:
                with open(self.arg.weights, 'r') as f:
                    weights = pickle.load(f)
            else:
                weights = torch.load(self.arg.weights)

            weights = OrderedDict([[k.split('module.')[-1],v.cpu()] for k, v in weights.items()])
            keys = list(weights.keys())

            # 网络恢复权重
            try:
                self.model.load_state_dict(weights)
            except:
                state = self.model.state_dict()
                diff = list(set(state.keys()).difference(set(weights.keys())))
                print('Can not find these weights:',diff)

                state.update(weights)
                self.model.load_state_dict(state)

        # 使用多gpu加速
        if self.arg.parallel:
            self.model = nn.DataParallel(self.model).cuda

    # 定义优化器
    def load_optimizer(self):
        if self.arg.optimizer == 'SGD':
            self.optimizer = optim.SGD(
                self.model.parameters(),
                lr=self.arg.base_lr,
                momentum=0.9,
                nesterov=True,
                weight_decay=self.arg.weight_decay)
        elif self.arg.optimizer == 'Adam':
            self.optimizer = optim.Adam(
                self.model.parameters(),
                lr=self.arg.base_lr,
                weight_decay=self.arg.weight_decay)

        # 按需调整学习率
        lr_scheduler_pre = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.arg.step, gamma=0.1)
        # 学习率预热:在刚开始训练的时候先使用一个较小的学习率,训练一些epoches,等模型稳定时再修改为预设的学习率进行训练
        self.lr_scheduler = GradualWarmupScheduler(self.optimizer, total_epoch=self.arg.warm_up_epoch,
                                                   after_scheduler=lr_scheduler_pre)

    # 调整学习率,配合学习率预热使用
    def adjust_learning_rate(self, epoch):
        # 先用最初的小学习率训练,然后每个epoch增大一点点,直到达到最初设置的比较大的学习率
        if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
            if epoch < self.arg.warm_up_epoch:
                # 预热:按epoch慢慢提高学习率,直至预设值
                lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
            else:
                # 过了预热轮:慢慢下降学习率,到设定的轮数时,按0.1的比例下降
                lr = self.arg.base_lr * (0.1 ** np.sum(epoch >= np.array(self.arg.step)))

            for param_group in self.optimizer.param_groups:
                param_group['lr'] = lr

            return lr

    # 训练
    def train(self, epoch,log):
        if self.arg.weights:
            old_epoch=self.arg.weights.split('.')[0].split('-')[1]
        else:
            old_epoch=0
        print('Training epoch: {}'.format(int(old_epoch)+epoch + 1))
        
        self.model.train()
        self.adjust_learning_rate(epoch)
        
        # 啥玩意?
        if self.arg.only_train_part:
            if epoch > self.arg.only_train_epoch:
                print('only train part, require grad')
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = True
            else:
                print('only train part, do not require grad')
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = False
        
        loss_value = []
        acc_total = []
        # 开始训练
        # for i, data_batch in enumerate(self.train_loader):
        for i, (data, label) in enumerate(self.train_loader):
            self.global_step += 1
            # # get data1
            # data_batch=data_batch[0].float()
            # labels_batch=self.train_label[i].view(1).long()
            # data = Variable(data_batch, requires_grad=False)
            # label = Variable(labels_batch, requires_grad=False)

            # get data2
            data = Variable(data.float())
            label = Variable(label.long())

            # forward
            output = self.model(data)
            loss = self.loss(output, label)

            # backward
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            loss_value.append(loss.data.item())

            # 对比pred和true
            value, pred = torch.max(output.data, 1)
            acc = torch.mean((pred == label.data).float())
            acc_total.append(acc)

            # statistics
            self.lr = self.optimizer.param_groups[0]['lr']
        
        loss=np.sum(loss_value)
        acc=np.mean(acc_total)
        print('training loss: {:.5f},  lr: {:.5f},  acc: {:.5f}'.format(loss,self.lr,acc))
        
        # 写入log
        log.write('epoch: {},  lr:{:.5f},   loss:{:.5f},   acc: {:.5f}\n'.format(epoch+1, self.lr, loss, acc))

        # 每n轮或者最后一轮保存模型
        if (epoch + 1) % self.arg.save_interval == 0 or (epoch + 1) == self.arg.num_epoch:
            # # 模型保存1
            # save_path=self.arg.model_dir+self.arg.type+'-'+str(epoch+1)+'-'+str(int(self.global_step))+'.pkl'
            # torch.save(self.model, save_path)

            # 模型保存2
            state_dict = self.model.state_dict()
            weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in state_dict.items()])
            
            save_path=self.arg.model_dir+self.arg.type+'-'+str(epoch+1)+'-'+str(int(self.global_step))+'.pkl'
            torch.save(weights, save_path)

    # 验证
    def eval(self,epoch,log):
        self.model.eval()  
        loss_value = []
        score_frag = []
        acc_total = []

        # for i, data_batch in enumerate(self.valid_loader):
        for i, (data, label) in enumerate(self.valid_loader):
            with torch.no_grad():
                # # get data1
                # data_batch=data_batch[0].float()
                # labels_batch=self.valid_label[i].view(1).long()
                # data = Variable(data_batch, requires_grad=False)
                # label = Variable(labels_batch, requires_grad=False)

                # get data2
                data = Variable(data.float())
                label = Variable(label.long())

                output = self.model(data)
                loss = self.loss(output, label)

                score_frag.append(output.data.cpu().numpy())
                loss_value.append(loss.data.item())

                # 对比pred和true
                value, predict_label = torch.max(output.data, 1)
                acc = torch.mean((predict_label == label.data).float())
                acc_total.append(acc)

        score = np.concatenate(score_frag)
        loss = np.sum(loss_value)
        # print('Mean vaild loss: {:.5f}.'.format(loss))

        # 验证集准确度
        accuracy = np.mean(acc_total)
        if accuracy > self.best_acc:
            self.best_acc = accuracy
        print('valid loss: {:.5f},  valid_acc: {:.5f}'.format(loss, accuracy))

        # 写入log
        log.write('epoch: {},   loss:{:.5f},   acc: {:.5f}\n'.format(epoch+1, loss, accuracy))

    def start(self):
        if os.path.exists(self.arg.model_dir) == False:
                os.makedirs(self.arg.model_dir)
        train_txt=open(self.arg.model_dir+"/train_log.txt","w+")
        valid_txt=open(self.arg.model_dir+"/valid_log.txt","w+")

        if self.arg.phase == 'train':
            # 步数统计
            self.global_step = self.arg.start_epoch * len(self.train_loader) / self.arg.batch_size
            
            for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
                # if self.lr < 1e-3:
                #     break

                star=time.time()
                self.train(epoch, train_txt)
                self.eval(epoch, valid_txt)

                end=time.time()-star
                print("time:",end)
            print('best accuracy: ', self.best_acc)

        elif self.arg.phase == 'test':
            # self.eval()
            print("acc:",100,'%')


# 设置训练加速参数
def init_seed(_):
    torch.cuda.manual_seed_all(1)
    torch.manual_seed(1)
    np.random.seed(1)
    random.seed(1)
    # torch.backends.cudnn.enabled = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# 返回agcn的Model
def import_class(name):
    components = name.split('.')
    mod = __import__(components[0])  # import agcn,aagcn

    # getattr:返回一个对象属性值,  如:获取mod对象的comp属性
    for comp in components[1:]:
        mod = getattr(mod, comp)

    return mod  # model.agcn.Model

# 把视频名字和标签分离
def get_label_vid(data):
    vid=[]
    label=[]
    for i in  data:
        label.append(int(i[2]))
        name=str(int(i[0]))+'_'+str(int(i[1]))+'.mp4'
        vid.append(name)
        
    labels=torch.from_numpy(np.array(label))
    return labels,np.array(vid)


if __name__ == '__main__':
    parser = get_parser()
    arg = parser.parse_args()

    # 定义加速参数
    init_seed(0)
    # 开始训练
    processor = Processor(arg)
    processor.start()

train_gpu.py  (joint+bone)

from __future__ import print_function
import argparse
import os
import pickle
import random
import time
from collections import OrderedDict
import numpy as np
from utils import agcn,aagcn
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import _LRScheduler,ReduceLROnPlateau
import torch.utils.data as data

device_type=torch.device('cuda')

def get_parser():
    parser = argparse.ArgumentParser()

    # data
    data_forder = "test9/bone/clear/2+3+4+5+6_v3"
    data_forder2 = "test9/bone/clear/2+3+4+5+6_v3"
    parser.add_argument('--train_path', type=str, default='data/'+str(data_forder)+'/train.pkl')
    parser.add_argument('--valid_path', type=str, default='data/'+str(data_forder)+'/valid.pkl')
    parser.add_argument('--test_path',  type=str, default='data/'+str(data_forder)+'/test.pkl')
    parser.add_argument('--batch_size',  type=int, default=256)
    parser.add_argument('--split_label',type=int,default=1)

    # model
    parser.add_argument('--phase', default='train')
    parser.add_argument('--type', default='bone')
    parser.add_argument('--model_dir', default='model/all/test2.3/bone/')
    # weight
    parser.add_argument('--weights', default=None,help='model/test1/bone/bone-1-128.pt')
    parser.add_argument('--model_name', default='utils.agcn.Model')
    # epoch
    parser.add_argument('--start_epoch',type=int,default=0)
    parser.add_argument('--num_epoch',type=int,default=100)
    parser.add_argument('--save_interval',type=int,default=20)

    # gpu
    parser.add_argument('--device',type=int,default=1)
    parser.add_argument('--parallel',default=False,help='多GPU使用')

    # optim
    parser.add_argument('--base_lr', type=float, default=0.1, help='initial learning rate')
    parser.add_argument('--step',type=int,default=[30,40,80],nargs='+',help='the arning rate')
    parser.add_argument('--warm_up_epoch', default=5)
    parser.add_argument('--optimizer', default='SGD', help='type of optimizer')    
    parser.add_argument('--weight-decay',type=float,default=0.0001,help='weight decay for optimizer')
    parser.add_argument('--dropout',type=float,default=0.5,help='weight decay for optimizer')

    # 啥玩意?
    parser.add_argument('--only_train_part', default=False)
    parser.add_argument('--only_train_epoch', default=5) #5

    return parser

# 学习率预热,学习率实时调整
class GradualWarmupScheduler(_LRScheduler):
    def __init__(self, optimizer, total_epoch, after_scheduler=None):
        self.total_epoch = total_epoch
        self.after_scheduler = after_scheduler
        self.finished = False
        self.last_epoch = -1
        super().__init__(optimizer)

    def get_lr(self):
        return [base_lr * (self.last_epoch + 1) / self.total_epoch for base_lr in self.base_lrs]

    def step(self, epoch=None, metric=None):
        if self.last_epoch >= self.total_epoch - 1:
            if metric is None:
                return self.after_scheduler.step(epoch)
            else:
                return self.after_scheduler.step(metric, epoch)
        else:
            return super(GradualWarmupScheduler, self).step(epoch)

class Processor():
    def __init__(self, arg):
        self.arg = arg      
        # 定义参数
        self.global_step = 0
        self.best_acc = 0
        self.lr = self.arg.base_lr
        self.load_model()
        self.load_optimizer()
        self.load_data()
        
    # 加载数据
    def load_data(self):
        if self.arg.phase=="test":
            self.arg.valid_path=self.arg.test_path
            
        # torch的数据加载方法
        train_tensor, train_label = torch.load(self.arg.train_path)
        valid_tensor, valid_label = torch.load(self.arg.valid_path)
        test_tensor , test_label  = torch.load(self.arg.test_path)

        # 把视频名字和标签分离
        if self.arg.split_label==1:
            train_label, self.train_vid = get_label_vid(train_label)
            valid_label, self.vaild_vid = get_label_vid(valid_label)
            test_label, self.test_vid = get_label_vid(test_label)

        self.train_loader = data.DataLoader(
                            data.TensorDataset(train_tensor.to(device_type), train_label.to(device_type)),
                            batch_size = self.arg.batch_size, shuffle=False)
        self.valid_loader = data.DataLoader(
                            data.TensorDataset(valid_tensor.to(device_type), valid_label.to(device_type)),
                            batch_size = self.arg.batch_size, shuffle=False)
        self.test_loader = data.DataLoader(
                            data.TensorDataset(test_tensor.to(device_type), test_label.to(device_type)),
                            batch_size = self.arg.batch_size, shuffle=False)
        print("train_nums:",len(train_label),"walid_nums:",len(valid_label))

    # 定义模型/加载模型
    def load_model(self):
        # # 定义模型1
        # Model = import_class(self.arg.model_name)
        # self.model = Model().cpu()
        
        # 定义模型2
        self.model=agcn.Model(dropout=self.arg.dropout).cuda(self.arg.device)
        self.loss = nn.CrossEntropyLoss().cuda(self.arg.device)

        # 判断是否加载预训练模型
        if self.arg.weights:
            self.global_step = int(arg.weights[:-3].split('-')[-1])
            print('Load weights from {}.'.format(self.arg.weights))

            # 加载权重文件
            if '.pkl' in self.arg.weights:
                with open(self.arg.weights, 'r') as f:
                    weights = pickle.load(f)
            else:
                weights = torch.load(self.arg.weights)

            weights = OrderedDict([[k.split('module.')[-1],v.cpu()] for k, v in weights.items()])
            keys = list(weights.keys())

            # 网络恢复权重
            try:
                self.model.load_state_dict(weights)
            except:
                state = self.model.state_dict()
                diff = list(set(state.keys()).difference(set(weights.keys())))
                print('Can not find these weights:',diff)

                state.update(weights)
                self.model.load_state_dict(state)

        # 使用多gpu加速
        if self.arg.parallel:
            self.model = nn.DataParallel(self.model).cuda

    # 定义优化器
    def load_optimizer(self):
        if self.arg.optimizer == 'SGD':
            self.optimizer = optim.SGD(
                self.model.parameters(),
                lr=self.arg.base_lr,
                momentum=0.9,
                nesterov=True,
                weight_decay=self.arg.weight_decay)
        elif self.arg.optimizer == 'Adam':
            self.optimizer = optim.Adam(
                self.model.parameters(),
                lr=self.arg.base_lr,
                weight_decay=self.arg.weight_decay)

        # 按需调整学习率
        # lr_scheduler_pre = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.arg.step, gamma=0.1)
        lr_scheduler_pre = ReduceLROnPlateau(self.optimizer, mode='min', factor=0.5, patience=10, 
                    verbose=False,threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0,eps=1e-08)
        
        # 学习率预热:在刚开始训练的时候先使用一个较小的学习率,训练一些epoches,等模型稳定时再修改为预设的学习率进行训练
        self.lr_scheduler = GradualWarmupScheduler(self.optimizer, total_epoch=self.arg.warm_up_epoch,
                                                   after_scheduler=lr_scheduler_pre)

    # 调整学习率,配合学习率预热使用
    def adjust_learning_rate(self, epoch):
        # 先用最初的小学习率训练,然后每个step增大一点点,直到达到最初设置的比较大的学习率
        if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
            if epoch < self.arg.warm_up_epoch:
                lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
            else:
                lr = self.arg.base_lr * (0.1 ** np.sum(epoch >= np.array(self.arg.step)))

            for param_group in self.optimizer.param_groups:
                param_group['lr'] = lr

            return lr

   # 训练
    def train(self, epoch,log):
        if self.arg.weights:
            old_epoch=self.arg.weights.split('.')[0].split('-')[1]
        else:
            old_epoch=0
        print('Training epoch: {}'.format(int(old_epoch)+epoch + 1))
        
        self.model.train()
        self.adjust_learning_rate(epoch)
        
        # 啥玩意?
        if self.arg.only_train_part:
            if epoch > self.arg.only_train_epoch:
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = True
            else:
                print('only train part, do not require grad')
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = False
        
        loss_value = []
        acc_total = []
        # 开始训练
        # for i, data_batch in enumerate(self.train_loader):
        for i, (data, label) in enumerate(self.train_loader):
            self.global_step += 1

            # # get data1
            # data_batch=data_batch[0].float()
            # labels_batch=self.train_label[i].view(1).long()
            # data = Variable(data_batch.cuda(self.arg.device), requires_grad=False)
            # label = Variable(labels_batch.cuda(self.arg.device), requires_grad=False)

            # get data2
            data = Variable(data.float().cuda(self.arg.device), requires_grad=False)
            label = Variable(label.long().cuda(self.arg.device), requires_grad=False)

            # forward
            output = self.model(data)
            loss = self.loss(output, label)

            # backward
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            loss_value.append(loss.data.item())

            # 对比pred和true
            value, pred = torch.max(output.data, 1)
            acc = torch.mean((pred == label.data).float())
            acc_total.append(acc)

            # statistics
            self.lr = self.optimizer.param_groups[0]['lr']
        
        loss=np.sum(loss_value)
        acc=np.mean(acc_total)
        print('training loss: {:.5f},  lr: {:.5f},  acc: {:.5f}'.format(loss,self.lr,acc))

        # 写入log
        log.write('epoch: {},  lr:{:.5f},   loss:{:.5f},   acc: {:.5f}\n'.format(epoch+1, self.lr, loss, acc))

        # 每n轮或者最后一轮保存模型
        if (epoch + 1) % self.arg.save_interval == 0 or (epoch + 1) == self.arg.num_epoch:
            if os.path.exists(self.arg.model_dir) == False:
                os.makedirs(self.arg.model_dir)

            # # 模型保存1
            # save_path=self.arg.model_dir+self.arg.type+'-'+str(epoch+1)+'-'+str(int(self.global_step))+'.pkl'
            # torch.save(self.model, save_path)
            # torch.save(self.model.state_dict, save_path)

            # 模型保存2
            state_dict = self.model.state_dict()
            weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in state_dict.items()])

            save_path=self.arg.model_dir+self.arg.type+'-'+str(epoch+1)+'.pt'
            torch.save(weights, save_path)

    # 验证
    def eval(self,epoch,log):
        self.model.eval()  
        loss_value = []
        score_frag = []
        acc_total = []

        # for i, data_batch in enumerate(self.valid_loader):
        for i, (data, label) in enumerate(self.valid_loader):
            with torch.no_grad():
                # # get data1
                # data_batch=data_batch[0].float()
                # labels_batch=self.valid_label[i].view(1).long()
                # data = Variable(data_batch.cuda(self.arg.device), requires_grad=False)
                # label = Variable(labels_batch.cuda(self.arg.device), requires_grad=False)

                # get data2
                data = Variable(data.float().cuda(self.arg.device), requires_grad=False)
                label = Variable(label.long().cuda(self.arg.device), requires_grad=False)

                output = self.model(data)
                loss = self.loss(output, label)

                score_frag.append(output.data.cpu().numpy())
                loss_value.append(loss.data.item())

                # 对比pred和true
                value, predict_label = torch.max(output.data, 1)
                acc = torch.mean((predict_label == label.data).float())
                acc_total.append(acc)

        score = np.concatenate(score_frag)
        loss = np.sum(loss_value)

        # 验证集准确度
        accuracy = np.mean(acc_total)
        if accuracy > self.best_acc:
            self.best_acc = accuracy
        print('valid loss: {:.5f},  valid_acc: {:.5f}'.format(loss, accuracy))

        # 写入log
        log.write('epoch: {},   loss:{:.5f},   acc: {:.5f}\n'.format(epoch+1, loss, accuracy))

    def start(self):
        if os.path.exists(self.arg.model_dir) == False:
                os.makedirs(self.arg.model_dir)
        train_txt=open(self.arg.model_dir+"/train_log.txt","w+")
        valid_txt=open(self.arg.model_dir+"/valid_log.txt","w+")

        if self.arg.phase == 'train':
            # 步数统计
            self.global_step = self.arg.start_epoch * len(self.train_loader) / self.arg.batch_size
            
            for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
                # if self.lr < 1e-3:
                #     break

                star=time.time()
                self.train(epoch,train_txt)
                self.eval(epoch,valid_txt)

                end=time.time()-star
                print("time:",end)
            print('best accuracy: ', self.best_acc)

        elif self.arg.phase == 'test':
            # self.eval()
            print("acc:",100,'%')


# 设置训练加速参数
def init_seed(_):
    torch.cuda.manual_seed_all(1)
    torch.manual_seed(1)
    np.random.seed(1)
    random.seed(1)
    # torch.backends.cudnn.enabled = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# 返回agcn的Model
def import_class(name):
    components = name.split('.')
    mod = __import__(components[0])  # import agcn,aagcn

    # getattr:返回一个对象属性值,  如:获取mod对象的comp属性
    for comp in components[1:]:
        mod = getattr(mod, comp)

    return mod  # model.agcn.Model

# 把视频名字和标签分离
def get_label_vid(data):
    vid=[]
    label=[]
    for i in  data:
        label.append(int(i[2]))
        name=str(int(i[0]))+'_'+str(int(i[1]))+'.mp4'
        vid.append(name)
        
    labels=torch.from_numpy(np.array(label))
    return labels,np.array(vid)


if __name__ == '__main__':
    parser = get_parser()
    arg = parser.parse_args()

    # 定义加速参数
    init_seed(0)
    # 开始训练
    processor = Processor(arg)
    processor.start()

test.py   (joint+bone)

分别测试骨骼模型和骨架模型,只需要修改代码中的路径即可。

from __future__ import print_function
import argparse
import os
import pickle
import random
import time
from collections import OrderedDict
import numpy as np
from utils import agcn,aagcn
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import _LRScheduler
import torch.utils.data as data


def get_parser():
    parser = argparse.ArgumentParser()

    # data
    data_forder = "test8/joint/clear/2"
    parser.add_argument('--train_path', type=str, default='data/'+str(data_forder)+'/train.pkl')
    parser.add_argument('--valid_path', type=str, default='data/'+str(data_forder)+'/valid.pkl')
    parser.add_argument('--test_path',  type=str, default='data/'+str(data_forder)+'/test.pkl')
    parser.add_argument('--batch_size',  type=int, default=1)
    parser.add_argument('--split_label',type=int,default=1)

    # model
    parser.add_argument('--phase', default='test')
    parser.add_argument('--type', default='joint')
    parser.add_argument('--model_dir', default='model/test1/joint/')
    # weight
    parser.add_argument('--weights', default='model/test1/joint/joint-2-904.pkl')
    parser.add_argument('--model_name', default='utils.agcn.Model')
    # epoch
    parser.add_argument('--start_epoch',type=int,default=0)
    parser.add_argument('--num_epoch',type=int,default=2)
    parser.add_argument('--save_interval',type=int,default=1)
    parser.add_argument('--eval_interval',type=int,default=5)

    # gpu
    parser.add_argument('--device',type=int,default=0)
    parser.add_argument('--parallel',default=False,help='多GPU使用')

    # optim
    parser.add_argument('--base_lr', type=float, default=0.1, help='initial learning rate')
    parser.add_argument('--step',type=int,default=[30, 40],nargs='+',help='the arning rate')
    parser.add_argument('--warm_up_epoch', default=0)
    parser.add_argument('--optimizer', default='SGD', help='type of optimizer')    
    parser.add_argument('--weight-decay',type=float,default=0.0001,help='weight decay for optimizer')

    # 啥玩意?
    parser.add_argument('--only_train_part', default=False)
    parser.add_argument('--only_train_epoch', default=0)

    return parser

# 学习率预热,学习率实时调整
class GradualWarmupScheduler(_LRScheduler):
    def __init__(self, optimizer, total_epoch, after_scheduler=None):
        self.total_epoch = total_epoch
        self.after_scheduler = after_scheduler
        self.finished = False
        self.last_epoch = -1
        super().__init__(optimizer)

    def get_lr(self):
        return [base_lr * (self.last_epoch + 1) / self.total_epoch for base_lr in self.base_lrs]

    def step(self, epoch=None, metric=None):
        if self.last_epoch >= self.total_epoch - 1:
            if metric is None:
                return self.after_scheduler.step(epoch)
            else:
                return self.after_scheduler.step(metric, epoch)
        else:
            return super(GradualWarmupScheduler, self).step(epoch)

class Processor():
    def __init__(self, arg):
        self.arg = arg      
        # 定义参数
        self.global_step = 0
        self.best_acc = 0
        self.lr = self.arg.base_lr
        self.load_model()
        self.load_optimizer()
        self.load_data()
        
    # 加载数据
    def load_data(self):
        if self.arg.phase=="test":
            self.arg.valid_path=self.arg.test_path
            
        # torch的数据加载方法
        train_tensor, train_label = torch.load(self.arg.train_path)
        valid_tensor, valid_label = torch.load(self.arg.valid_path)
        test_tensor , test_label  = torch.load(self.arg.test_path)

        # 数据加载器,一次性加载所有数据,每次取出batch个数据
        self.train_loader = data.DataLoader(data.TensorDataset(train_tensor.to()),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.valid_loader = data.DataLoader(data.TensorDataset(valid_tensor.to()),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.test_loader  = data.DataLoader(data.TensorDataset(test_tensor.to()),
                                       batch_size = self.arg.batch_size, shuffle=False)
        self.train_label = train_label.to()
        self.valid_label = valid_label.to()
        self.test_label  = test_label.to()
        # print(len(self.valid_loader),len(self.valid_label))

        # 把视频名字和标签分离
        if self.arg.split_label==1:
            self.train_label, self.train_vid = get_label_vid(train_label)
            self.valid_label, self.vaild_vid = get_label_vid(valid_label)
            self.test_label, self.test_vid = get_label_vid(test_label)

        # return train_loader,train_label, valid_loader,valid_label, test_loader,test_label

    # 定义模型/加载模型
    def load_model(self):
        # # 定义模型1
        # Model = import_class(self.arg.model_name)
        # self.model = Model().cpu()
        
        # 定义模型2
        self.model=agcn.Model().cpu()
        self.loss = nn.CrossEntropyLoss().cpu()

        # 判断是否加载预训练模型
        if self.arg.weights:
            self.global_step = int(arg.weights.split('.')[0].split('-')[-1])
            print('Load weights from {}.'.format(self.arg.weights))

            # 加载权重文件
            weights = torch.load(self.arg.weights)
            self.model.load_state_dict(weights)

            # if '.pkl' in self.arg.weights:
            #     with open(self.arg.weights, 'r') as f:
            #         weights = pickle.load(f)
            # else:
            #     weights = torch.load(self.arg.weights)

            # weights = OrderedDict([[k.split('module.')[-1],v.cpu()] for k, v in weights.items()])
            # keys = list(weights.keys())
            
            # # 网络恢复权重
            # try:
            #     self.model.load_state_dict(weights)
            # except:
            #     state = self.model.state_dict()
            #     diff = list(set(state.keys()).difference(set(weights.keys())))
            #     print('Can not find these weights:',diff)

            #     state.update(weights)
            #     self.model.load_state_dict(state)

        # 使用多gpu加速
        if self.arg.parallel:
            self.model = nn.DataParallel(self.model).cuda

    # 定义优化器
    def load_optimizer(self):
        if self.arg.optimizer == 'SGD':
            self.optimizer = optim.SGD(
                self.model.parameters(),
                lr=self.arg.base_lr,
                momentum=0.9,
                nesterov=True,
                weight_decay=self.arg.weight_decay)
        elif self.arg.optimizer == 'Adam':
            self.optimizer = optim.Adam(
                self.model.parameters(),
                lr=self.arg.base_lr,
                weight_decay=self.arg.weight_decay)

        # 按需调整学习率
        lr_scheduler_pre = optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=self.arg.step, gamma=0.1)
        # 学习率预热:在刚开始训练的时候先使用一个较小的学习率,训练一些epoches,等模型稳定时再修改为预设的学习率进行训练
        self.lr_scheduler = GradualWarmupScheduler(self.optimizer, total_epoch=self.arg.warm_up_epoch,
                                                   after_scheduler=lr_scheduler_pre)

    # 调整学习率,配合学习率预热使用
    def adjust_learning_rate(self, epoch):
        # 先用最初的小学习率训练,然后每个step增大一点点,直到达到最初设置的比较大的学习率
        if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
            if epoch < self.arg.warm_up_epoch:
                lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
            else:
                lr = self.arg.base_lr * (0.1 ** np.sum(epoch >= np.array(self.arg.step)))

            for param_group in self.optimizer.param_groups:
                param_group['lr'] = lr

            return lr

    # 训练
    def train(self, epoch):
        print('Training epoch: {}'.format(epoch + 1))
        self.model.train()
        self.adjust_learning_rate(epoch)
        
        # 啥玩意?
        if self.arg.only_train_part:
            if epoch > self.arg.only_train_epoch:
                print('only train part, require grad')
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = True
            else:
                print('only train part, do not require grad')
                for key, value in self.model.named_parameters():
                    if 'PA' in key:
                        value.requires_grad = False
        
        loss_value = []
        acc_total = []
        # 开始训练
        for i, data_batch in enumerate(self.train_loader):
            self.global_step += 1

            data_batch=data_batch[0].float()
            labels_batch=self.train_label[i].view(1).long()
            # print(data_batch.size())
            
            # get data
            data = Variable(data_batch, requires_grad=False)
            label = Variable(labels_batch, requires_grad=False)

            # forward
            output = self.model(data)
            loss = self.loss(output, label)

            # backward
            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()
            loss_value.append(loss.data.item())

            # 对比pred和true
            value, pred = torch.max(output.data, 1)
            acc = torch.mean((pred == label.data).float())
            acc_total.append(acc)

            # statistics
            self.lr = self.optimizer.param_groups[0]['lr']
        
        loss=np.mean(loss_value)
        acc=np.mean(acc_total)
        print('training loss: {:.5f},  acc: {:.5f}'.format(loss,acc))

        # 每n轮或者最后一轮保存模型
        if (epoch + 1) % self.arg.save_interval == 0 or (epoch + 1) == self.arg.num_epoch:
            if os.path.exists(self.arg.model_dir) == False:
                os.makedirs(self.arg.model_dir)

            # # 模型保存1
            # save_path=self.arg.model_dir + '-' + str(epoch) + '-' + str(int(self.global_step)) + '.pt.tar'
            # torch.save(self.model, save_path)

            # 模型保存2
            state_dict = self.model.state_dict()
            weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in state_dict.items()])

            save_path=self.arg.model_dir+self.arg.type+'-'+str(epoch)+'-'+str(int(self.global_step))+'.pt'
            torch.save(weights, save_path)

    # 验证
    def eval(self):
        print(self.model.state_dict())
        self.model.eval()  
        loss_value = []
        score_frag = []
        acc_total = []

        for i, data_batch in enumerate(self.valid_loader):
            data_batch=data_batch[0].float()
            labels_batch=self.valid_label[i].view(1).long()

            with torch.no_grad():
                # get data
                data = Variable(data_batch, requires_grad=False)
                label = Variable(labels_batch, requires_grad=False)

                output = self.model(data)
                loss = self.loss(output, label)

                score_frag.append(output.data.cpu().numpy())
                loss_value.append(loss.data.item())

                # 对比pred和true
                value, pred = torch.max(output.data, 1)
                acc = torch.mean((pred == label.data).float())
                acc_total.append(acc)

                # # 分析错误
                # if pred != label:
                #     print(pred, label, self.vaild_vid[i])


        score = np.concatenate(score_frag)
        loss = np.mean(loss_value)
        # print('Mean vaild loss: {:.5f}.'.format(loss))

        # 验证集准确度
        accuracy = np.mean(acc_total)
        if accuracy > self.best_acc:
            self.best_acc = accuracy
        print('Vaild Accuracy: ', accuracy)

    def start(self):
        if self.arg.phase == 'train':
            print('train finish!')

        elif self.arg.phase == 'test':
            self.eval()


# 设置训练加速参数
def init_seed(_):
    torch.cuda.manual_seed_all(1)
    torch.manual_seed(1)
    np.random.seed(1)
    random.seed(1)
    # torch.backends.cudnn.enabled = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

# 返回agcn的Model
def import_class(name):
    components = name.split('.')
    mod = __import__(components[0])  # import agcn,aagcn

    # getattr:返回一个对象属性值,  如:获取mod对象的comp属性
    for comp in components[1:]:
        mod = getattr(mod, comp)

    return mod  # model.agcn.Model

# 把视频名字和标签分离
def get_label_vid(data):
    vid=[]
    label=[]
    for i in  data:
        label.append(int(i[2]))
        name=str(int(i[0]))+'_'+str(int(i[1]))+'.mp4'
        vid.append(name)
        
    labels=torch.from_numpy(np.array(label))
    return labels,np.array(vid)


if __name__ == '__main__':
    parser = get_parser()
    arg = parser.parse_args()

    # 定义加速参数
    init_seed(0)
    # 开始训练
    processor = Processor(arg)
    processor.start()

ensemble.py 

合并测试=骨骼模型测试+骨架模型测试

import torch
import numpy as np
import test

# joint
joint_weights='model/test1/joint/joint-1-904.pt'
joint_data_forder = "test8/joint/clear/3"
joint_score, joint_label, joint_vid = test.main(joint_data_forder, joint_weights)

# bone
bone_weights='model/test1/bone/bone-1-128.pt'
bone_data_forder = "test8/bone/clear/3"
bone_score, bone_label, bone_vid = test.main(bone_data_forder, bone_weights)

# merge
acc_total=[]
for i in range(len(joint_score)):
	if joint_label[i]==bone_label[i]:
		# 标签
		label=joint_label[i]
		label=label.view(1).long()

		# 双流融合
		score=joint_score[i] + bone_score[i]
		score=torch.from_numpy(score).view(1,-1)

		# 预测
		_,pred=torch.max(score.view(1,-1), 1)
		acc_total.append((pred == label).float())

		# 分析错误
		if pred != label:
			print(pred, label, joint_vid[i], bone_vid[i])

	else:
		print("节点不对!")
		break

acc=np.mean(acc_total)
print(acc)

data/train_bone.yaml

训练骨骼模型的参数,训练骨架模型时修改下路径即可

work_dir: model/work_dir/agcn_bone
model_saved_name: model/test1/bone/bone

# feeder
feeder: feeders.feeder.Feeder
train_feeder_args:
  data_path: ./data/ntu/xview/train_data_bone.npy
  label_path: ./data/ntu/xview/train_label.pkl
  debug: False
  random_choose: False
  random_shift: False
  random_move: False
  window_size: -1
  normalization: False

test_feeder_args:
  data_path: ./data/ntu/xview/val_data_bone.npy
  label_path: ./data/ntu/xview/val_label.pkl

# model
model: utils.agcn.Model
model_args:
  num_class: 4
  num_point: 15
  num_person: 1
  graph: graph.ntu_rgb_d.Graph
  graph_args:
    labeling_mode: 'spatial'

#optim
weight_decay: 0.0001
base_lr: 0.1
step: [30, 40]

# training
device: [0,1]
batch_size: 1
test_batch_size: 1
num_epoch: 200
nesterov: True
parallel: False


data/test_bone.yaml

测试骨骼模型的参数,测试骨架模型时修改下路径即可

# feeder
feeder: feeders.feeder.Feeder
test_feeder_args:
  data_path: ./data/ntu/xsub/val_data_bone.npy
  label_path: ./data/ntu/xsub/val_label.pkl
  debug: False

# model
model: model.agcn.Model
model_args:
  num_class: 4
  num_point: 15
  num_person: 1
  graph: graph.ntu_rgb_d.Graph
  graph_args:
    labeling_mode: 'spatial'

# test
phase: test
device: [0,1]
test_batch_size: 1
weights: model/test1/bone-1-128.pt

work_dir: model/work_dir/agcn_bone
model_saved_name: model/test1/bone
save_score: True

train_path: data/test8/bone/clear/2/train.pkl
valid_path: data/test8/bone/clear/2/valid.pkl
test_path: data/test8/bone/clear/2/test.pkl

  • 1
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 4
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值