目的:人在穿戴手表的时候,通过6轴传感器采集到三个方向的加速度acc和角速度gyro,以此检测人体的运动状态,是处于静止(坐着)、走路或跑步状态。
方法1:随机森林(AdaBoost/单一决策树/随机森林/极限随机树)
数据集:20-30人
准确度:0.95
尝试特征:夹角余弦、方差、梯度、中位数、波峰值、平方根、累计加速度
训练技巧:
1.数据解析+清洗,使用滑窗方式,间隔20个点
2.下采样间隔取点,归一化,pca降维
摸索阶段尝试方法:
尝试方法
测试结果
原数据结构:
清洗后数据结构:
先介绍方法1,使用机器学习方法进行训练
数据清洗:
滑窗获取数据,间隔20个点进行滑窗,200个点作为一条数据
import json as js
import os
import numpy as np
def get_txt(file_name, gg, label):
f1=open(file_name,'w+')
# 数据转换
for i in range(len(gg)):
if gg[i] >= 32768:
gg[i] = gg[i] - 65535 - 1
# 循环滑动
t=20
total=int(len(gg)/t)-int(200/t)
for i in range(total+1):
line=gg[i*t:i*t+200]
line=",".join('%s' %id for id in line) # list-str
f1.write(str(label)+','+line+"\n")
# print(i, i*20, i*20+200, type(line))
def read_json(save_path, file_path):
with open(file_path, 'r') as f:
lines = f.readlines()
ax = []
ay = []
az = []
gx = []
gy = []
gz = []
ppg1 = []
ppg2 = []
ppg3 = []
for line in lines:
tmp = js.loads(line)
if 'accX' in tmp:
ax = ax + tmp['accX'][0:10]
if 'accY' in tmp:
ay = ay + tmp['accY'][0:10]
if 'accZ' in tmp:
az = az + tmp['accZ'][0:10]
if 'gyroA' in tmp:
gx = gx + tmp['gyroA'][0:10]
if 'gyroB' in tmp:
gy = gy + tmp['gyroB'][0:10]
if 'gyroC' in tmp:
gz = gz + tmp['gyroC'][0:10]
if 'ppgDataChannel1Ambient' in tmp:
ppg1 = ppg1 + tmp['ppgDataChannel1Ambient']
if 'ppgDataChannel2Green' in tmp:
ppg2 = ppg2 + tmp['ppgDataChannel2Green']
if 'ppgDataChannel6Infrared' in tmp:
ppg3 = ppg3 + tmp['ppgDataChannel6Infrared']
# ppg = list_sub(ppg2, ppg1)
# 转换数据、滑窗取数
name=file_path.split('/')[-1].split('.')[0]
label=name.split(' ')[0][-1]
# label=1
get_txt(save_path+name+'_'+str(label)+"-ax.txt",ax,label)
get_txt(save_path+name+'_'+str(label)+"-ay.txt",ay,label)
get_txt(save_path+name+'_'+str(label)+"-az.txt",az,label)
get_txt(save_path+name+'_'+str(label)+"-gx.txt",gx,label)
get_txt(save_path+name+'_'+str(label)+"-gy.txt",gy,label)
get_txt(save_path+name+'_'+str(label)+"-gz.txt",gz,label)
get_txt(save_path+name+'_'+str(label)+"-ppg2.txt",ppg2,label)
get_txt(save_path+name+'_'+str(label)+"-ppg3.txt",ppg3,label)
return ax, ay, az, gx, gy, gz, ppg2, ppg3
path="test/test6/src/"
save_path="test/test6/clear-200/"
if not os.path.exists(save_path):os.mkdir(save_path)
for file in os.listdir(path):
txt=path+file
ax, ay, az, gx, gy, gz, ppg2, ppg3 = read_json(save_path, txt)
print(file, len(ax), len(gx))
选取特征+模型训练(sklearn)
import numpy as np
import cv2
import shutil
import os
from sklearn.model_selection import *
from numpy import *
from sklearn.decomposition import PCA
import pandas as pd
import time
from sklearn.metrics import accuracy_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV, KFold
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import preprocessing
def get_angle(x, y):
# cosangle = x.dot(y)/(np.linalg.norm(x) * np.linalg.norm(y))
# 两个向量
Lx=np.sqrt(x.dot(x))
Ly=np.sqrt(y.dot(y))
cos_angle=x.dot(y)/(Lx*Ly) #相当于勾股定理,求得斜线的长度
cos_angle=abs(cos_angle)
#求得cos_sita的值再反过来计算,绝对长度乘以cos角度为矢量长度,初中知识。。
angle=np.arccos(cos_angle)
angle2=angle*360/2/np.pi
return cos_angle, angle2
#下采样,间隔取点
def downsample(data, num):
gg=[]
for i in range(len(data)):
if i%num==0:
gg.append(data[i])
return gg
def load_data(path):
ax=[]
ay=[]
az=[]
gx=[]
gy=[]
gz=[]
ppg=[]
labels=[]
for file in os.listdir(path):
file_name=path+file
# print(file_name)
f=open(file_name, 'r')
for line in f.readlines():
line=line.strip().split(',')
label=line[0]
data=line[1:]
# data=downsample(data,2) #下采样,间隔取点
data=np.array(data, dtype='float32')
data=np.reshape(data,(-1,1))
# 归一化/标准化
# data = preprocessing.scale(data, axis=1, with_std=True) # 标准化
# data = preprocessing.normalize(data, norm="l1", axis=1) # 归一化
# data=(data-np.min(data))/(np.max(data)-np.min(data)) # min-max归一化
# data=(data-np.mean(data)) / np.std(data) # 0均值,1方差 归一化
if 'ax' in file:
ax.append(data)
labels.append([int(label)])
if 'ay' in file:
ay.append(data)
if 'az' in file:
az.append(data)
if 'gyrox' in file or 'gx' in file:
gx.append(data)
if 'gyroy' in file or 'gy' in file:
gy.append(data)
if 'gyroz' in file or 'gz' in file:
gz.append(data)
if 'ppg' in file:
ppg.append(data)
# print(gx[0])
# print(gy[0])
datas=[]
for i in range(len(ax)):
a=[]
# # 1.夹角余弦
# cos1, angle1=get_angle(ax[i].flatten(), ay[i].flatten())
# cos2, angle2=get_angle(ax[i].flatten(), az[i].flatten())
# cos3, angle3=get_angle(ay[i].flatten(), az[i].flatten())
# cos4, angle4=get_angle(gx[i].flatten(), gy[i].flatten())
# cos5, angle5=get_angle(gx[i].flatten(), gz[i].flatten())
# cos6, angle6=get_angle(gy[i].flatten(), gz[i].flatten())
# print(labels[i], cos1, angle1)
# 2.std
std_ax=np.std(ax[i])
std_ay=np.std(ay[i])
std_az=np.std(az[i])
std_gx=np.std(gx[i])
std_gy=np.std(gy[i])
std_gz=np.std(gz[i])
# print(labels[i], std_ax, std_ay, std_az, std_gx, std_gy, std_gz)
# 3.std比值
radio1=std_ax/std_ay
radio2=std_ax/std_az
radio3=std_ay/std_az
radio4=std_gx/std_gy
radio5=std_gx/std_gz
radio6=std_gy/std_gz
# print(labels[i], radio1,radio2,radio3,radio4,radio5,radio6)
# 原始特征、夹角、std、std比值
# a.append(ax[i])
# a.append(ay[i])
# a.append(az[i])
# a.append(gx[i])
# a.append(gy[i])
a.append(gz[i])
# a.append(ppg[i])
# a.append(std_ax)
# a.append(std_ay)
# a.append(std_az)
# a.append(std_gx)
# a.append(std_gy)
# a.append(std_gz)
# a.append(radio1)
# a.append(radio2)
# a.append(radio3)
# a.append(radio4)
# a.append(radio5)
# a.append(radio6)
datas.append(a)
return datas, labels
def model(name):
# 加载训练数据
path='data/jiexi0/'+str(name)+'/train/'
train_data, train_label=load_data(path)
train_data=np.array(train_data, dtype='float32')
train_data=np.reshape(train_data,(train_data.shape[0],-1))
train_label=np.array(train_label).ravel()
print(train_data.shape, train_label.shape)
# 加载测试数据
path='data/jiexi0/'+str(name)+'/test/'
test_data, test_label=load_data(path)
test_data=np.array(test_data, dtype='float32')
test_data=np.reshape(test_data,(test_data.shape[0],-1))
test_label=np.array(test_label).ravel()
print(test_data.shape, test_label.shape)
# # pca进行数据降维
# pca = PCA(n_components=20)
# pca=pca.fit(train_data) #训练
# train_data=pca.transform(train_data) #降维
# test_data=pca.transform(test_data)
# # print(train_data.shape, test_data.shape)
# # print(pca.explained_variance_ratio_) #新特征 每维所能解释的方差大小在全方差中所占比例
gg=[]
model_path='model/test1/'
for i in range(1,3):
train_features, val_features, train_labels, val_labels = \
train_test_split(train_data, train_label, test_size=0.2, random_state=i+1)
# print(train_features.shape, np.sum(train_labels), val_features.shape, np.sum(val_labels))
# # 1.AdaBoost
# # clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2, min_samples_split=20, min_samples_leaf=5),n_estimators=200,algorithm='SAMME.R',learning_rate=1)
# clf = AdaBoostClassifier(n_estimators=100,algorithm='SAMME.R',learning_rate=1)
# clf.fit(train_features,train_labels)
# total=0
# for i in range(len(test_data)):
# data=test_data[i]
# label=test_label[i]
# data=np.array(data).reshape(1, -1)
# pred = clf.predict(data)
# total += accuracy_score(pred, [label])
# acc=total/len(test_data)
# # print(acc)
# # 2.单一决策树
# clf = DecisionTreeClassifier(random_state=0)
# clf = clf.fit(train_features, train_labels)
# acc = clf.score(test_data, test_label)
# # print("Single Tree:{}".format(acc))
# 3.随机森林
# oob_score:交叉验证方法 criterion:与决策树一样,可以选择gini或entropy(信息增益)
rfc = RandomForestClassifier(n_estimators=100, criterion='gini', oob_score =True, max_features = "auto")
rfc = rfc.fit(train_features, train_labels)
acc = rfc.score(test_data, test_label)
# acc = rfc.score(val_features, val_labels)
# # 极限随机树
# ret_clf = ExtraTreesClassifier(n_estimators=200, bootstrap=True, oob_score=True)
# ret_clf = ret_clf.fit(train_features, train_labels)
# acc = ret_clf.score(test_data, test_label)
# # acc = ret_clf.score(val_features, val_labels)
gg.append(acc)
print("n轮平均准确度为:", np.mean(gg))
return np.mean(gg)
avg=[]
name_list=['txt1','txt2','txt3','txt4','txt5']
for name in name_list:
print(name)
gg=model(name)
avg.append(gg)
print("mean acc:",np.mean(avg))
# model('txt1')
选取特征+模型训练_v1(sklearn)
import numpy as np
import cv2
import os
def get_angle(x, y):
# cosangle = x.dot(y)/(np.linalg.norm(x) * np.linalg.norm(y))
# 两个向量
Lx=np.sqrt(x.dot(x))
Ly=np.sqrt(y.dot(y))
cos_angle=x.dot(y)/(Lx*Ly) #相当于勾股定理,求得斜线的长度
cos_angle=abs(cos_angle)
#求得cos_sita的值再反过来计算,绝对长度乘以cos角度为矢量长度,初中知识。。
angle=np.arccos(cos_angle)
angle2=angle*360/2/np.pi
return cos_angle, angle2
#下采样,间隔取点
def downsample(data, num):
gg=[]
for i in range(len(data)):
if i%num==0:
gg.append(data[i])
return gg
def load_data(path):
ax=[]
ay=[]
az=[]
gx=[]
gy=[]
gz=[]
ppg=[]
labels=[]
for file in os.listdir(path):
file_name=path+file
# print(file_name)
f=open(file_name, 'r')
for line in f.readlines():
line=line.strip().split(',')
label=line[0]
data=line[1:]
# data=downsample(data,2) #下采样,间隔取点
data=np.array(data, dtype='float32')
data=np.reshape(data,(-1,1))
# 归一化/标准化
# data = preprocessing.scale(data, axis=1, with_std=True) # 标准化
# data = preprocessing.normalize(data, norm="l1", axis=1) # 归一化
# data=(data-np.min(data))/(np.max(data)-np.min(data)) # min-max归一化
# data=(data-np.mean(data)) / np.std(data) # 0均值,1方差 归一化
if 'ax' in file:
ax.append(data)
labels.append([int(label)])
if 'ay' in file:
ay.append(data)
if 'az' in file:
az.append(data)
if 'gyrox' in file or 'gx' in file:
gx.append(data)
if 'gyroy' in file or 'gy' in file:
gy.append(data)
if 'gyroz' in file or 'gz' in file:
gz.append(data)
if 'ppg' in file:
ppg.append(data)
# print(gx[0])
# print(gy[0])
datas=[]
for i in range(len(ax)):
a=[]
# # 1.夹角余弦
# cos1, angle1=get_angle(ax[i].flatten(), ay[i].flatten())
# cos2, angle2=get_angle(ax[i].flatten(), az[i].flatten())
# cos3, angle3=get_angle(ay[i].flatten(), az[i].flatten())
# cos4, angle4=get_angle(gx[i].flatten(), gy[i].flatten())
# cos5, angle5=get_angle(gx[i].flatten(), gz[i].flatten())
# cos6, angle6=get_angle(gy[i].flatten(), gz[i].flatten())
# print(labels[i], cos1, angle1)
# 2.std
std_ax=np.std(ax[i])
std_ay=np.std(ay[i])
std_az=np.std(az[i])
std_gx=np.std(gx[i])
std_gy=np.std(gy[i])
std_gz=np.std(gz[i])
# print(labels[i], std_ax, std_ay, std_az, std_gx, std_gy, std_gz)
# 3.std比值
radio1=std_ax/std_ay
radio2=std_ax/std_az
radio3=std_ay/std_az
radio4=std_gx/std_gy
radio5=std_gx/std_gz
radio6=std_gy/std_gz
# print(labels[i], radio1,radio2,radio3,radio4,radio5,radio6)
# 原始特征、夹角、std、std比值
# a.append(ax[i])
# a.append(ay[i])
# a.append(az[i])
# a.append(gx[i])
# a.append(gy[i])
a.append(gz[i])
# a.append(ppg[i])
# a.append(std_ax)
# a.append(std_ay)
# a.append(std_az)
# a.append(std_gx)
# a.append(std_gy)
# a.append(std_gz)
# a.append(radio1)
# a.append(radio2)
# a.append(radio3)
# a.append(radio4)
# a.append(radio5)
# a.append(radio6)
datas.append(a)
return datas, labels
def train(path, model_path):
# 加载训练数据
# path='data/jiexi1/'+str(name)+'/train/'
train_data, train_label=load_data(path)
train_data=np.array(train_data, dtype='float32')
train_data=np.reshape(train_data,(train_data.shape[0],-1))
train_label=np.array(train_label).ravel()
print("加载数据:", train_data.shape, train_label.shape)
# # pca进行数据降维
# pca = PCA(n_components=20)
# pca=pca.fit(train_data) #训练
# train_data=pca.transform(train_data) #降维
# test_data=pca.transform(test_data)
# # print(train_data.shape, test_data.shape)
# # print(pca.explained_variance_ratio_) #新特征 每维所能解释的方差大小在全方差中所占比例
# 创建分类器
RTrees = cv2.ml.RTrees_create()
RTrees.setTermCriteria((cv2.TermCriteria_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 0.001))
#参数设置
RTrees.setMaxDepth(8) #树的最大深度
RTrees.setMinSampleCount(8) #节点样本数的最小值
RTrees.setRegressionAccuracy(0.001)
RTrees.setMaxCategories(10) #决策树的最大预分类数量
RTrees.setUseSurrogates(False) #是否允许使用替代分叉点处理丢失的数据
RTrees.setCVFolds(0) #如果 CVFolds>1 那么就使用k-fold交叉修建决策树 其中k=CVFolds
RTrees.setTruncatePrunedTree(False)
RTrees.setCalculateVarImportance(True)
# 训练
ret = RTrees.train(train_data, cv2.ml.ROW_SAMPLE, train_label)
RTrees.save(model_path)
print(["模型训练完成!"])
def test(path, model_path):
# 加载测试数据
# path='data/jiexi1/'+str(name)+'/test/'
test_data, test_label=load_data(path)
test_data=np.array(test_data, dtype='float32')
test_data=np.reshape(test_data,(test_data.shape[0],-1))
test_label=np.array(test_label).ravel()
print("加载数据:", test_data.shape, test_label.shape)
# # pca进行数据降维
# pca = PCA(n_components=20)
# pca=pca.fit(train_data) #训练
# train_data=pca.transform(train_data) #降维
# test_data=pca.transform(test_data)
# 测试
RTrees = cv2.ml.RTrees_load(model_path)
(ret, res) = RTrees.predict(test_data)
n=0
lens=len(test_data)
for i in range(lens):
if res[i]==test_label[i]:
n=n+1
# else:
# print(res[i],test_label[i])
# q=""
# for a in test_data[i]:
# q+=str(a)+","
# f.write(str(test_label[i])+","+q+'\n')
Accuracy=n/lens
print("准确度为:",Accuracy)
# f=open('data/gg.txt','w+')
model_path='model/test5.xml'
train('data/jiexi1/txt3/train/', model_path)
test('data/jiexi1/txt3/test/', model_path)
选取特征+模型训练_v2(增加新特征)(sklearn)
import numpy as np
import cv2
import os
import math
from scipy import signal
def load_data(path):
ax=[]
ay=[]
az=[]
gx=[]
gy=[]
gz=[]
ppg=[]
labels=[]
files=[]
for file in os.listdir(path):
file_name=path+file
f=open(file_name, 'r')
for line in f.readlines():
line=line.strip().split(',')
label=line[0]
data=line[1:]
data=np.array(data, dtype='float32')
data=np.reshape(data,(-1,1))
# 归一化/标准化
# data = preprocessing.scale(data, axis=1, with_std=True) # 标准化
# data = preprocessing.normalize(data, norm="l1", axis=1) # 归一化
# data=(data-np.min(data))/(np.max(data)-np.min(data)) # min-max归一化
# data=(data-np.mean(data)) / np.std(data) # 0均值,1方差 归一化
if 'ax' in file:
ax.append(data)
labels.append([int(label)])
files.append(file.split('-')[0])
if 'ay' in file:
ay.append(data)
if 'az' in file:
az.append(data)
if 'gyrox' in file or 'gx' in file:
gx.append(data)
if 'gyroy' in file or 'gy' in file:
gy.append(data)
if 'gyroz' in file or 'gz' in file:
gz.append(data)
if 'ppg' in file:
ppg.append(data)
datas=[]
acc=[]
for i in range(len(ax)):
a=[]
# 2.std
std_ax=np.std(ax[i])
std_ay=np.std(ay[i])
std_az=np.std(az[i])
std_gx=np.std(gx[i])
std_gy=np.std(gy[i])
std_gz=np.std(gz[i])
# print(labels[i][0], std_ax, std_ay, std_az, std_gx, std_gy, std_gz)
# 4.计算梯度——100
sobelx=cv2.Sobel(gz[i],cv2.CV_64F,dx=0,dy=1)
sobelx=cv2.convertScaleAbs(sobelx)
grad=np.mean(sobelx)
# 5.中位数——60
median_num=np.median(abs(gz[i]))
# print(gg, labels[i][0])
# 6.前100个数的均值——gz:20/30
qq=sorted(abs(gz[i]))[0:100]
mean_gz=np.mean(qq)
# if labels[i][0]==0:
# print(mean_gz,labels[i][0])
# # 7.波峰-没有用
# hz=np.reshape(abs(gz[i]),(-1))
# num_peak_3 = signal.find_peaks(hz, distance=5)
# index=np.sort(hz[num_peak_3[0]])
# hz_value=index[-1]-index[-2]
# print(hz_value, labels[i][0])
# # 8.平方根-没有用
# sqrt_num=np.sqrt(pow(ax[i],2)+pow(ay[i],2)+pow(az[i],2))
# std=np.std(sqrt_num)
# if labels[i][0]==1:
# print(std, labels[i][0])
yuzhi=30
if mean_gz>yuzhi and labels[i][0]==1:
acc.append(1)
elif mean_gz<yuzhi and labels[i][0]==0:
acc.append(1)
else:
acc.append(0)
# print(mean_gz,labels[i][0])
# q=""
# for value in gz[i]:
# q+=str(value[0])+","
# ff.write(str(labels[i][0])+","+q+'\n')
a.append(mean_gz)
a.append(median_num)
a.append(grad)
# a.append(std)
# 原始特征、夹角、std、std比值
# a.append(ax[i])
# a.append(ay[i])
# a.append(az[i])
# a.append(gx[i])
# a.append(gy[i])
# a.append(gz[i])
# gz[i]=np.append(gz[i], mean_gz)
# gz[i]=np.append(gz[i],grad)
# gz[i]=np.append(gz[i],median_num)
# gz[i]=np.append(gz[i],std)
# a.append(gz[i])
# a.append(std_ax)
# a.append(std_ay)
# a.append(std_az)
# a.append(std_gx)
# a.append(std_gy)
# a.append(std_gz)
datas.append(a)
print(np.mean(acc))
return datas, labels, files
def train(path, model_path):
# 加载训练数据
# path='data/jiexi1/'+str(name)+'/train/'
train_data, train_label, train_files=load_data(path)
train_data=np.array(train_data, dtype='float32')
train_data=np.reshape(train_data,(train_data.shape[0],-1))
train_label=np.array(train_label).ravel()
print("加载数据:", train_data.shape, train_label.shape)
# # pca进行数据降维
# pca = PCA(n_components=20)
# pca=pca.fit(train_data) #训练
# train_data=pca.transform(train_data) #降维
# test_data=pca.transform(test_data)
# # print(train_data.shape, test_data.shape)
# # print(pca.explained_variance_ratio_) #新特征 每维所能解释的方差大小在全方差中所占比例
# 创建分类器
RTrees = cv2.ml.RTrees_create()
RTrees.setTermCriteria((cv2.TermCriteria_MAX_ITER + cv2.TERM_CRITERIA_EPS, 100, 0.001))
#参数设置
RTrees.setMaxDepth(8) #树的最大深度
RTrees.setMinSampleCount(8) #节点样本数的最小值
RTrees.setRegressionAccuracy(0.001)
RTrees.setMaxCategories(10) #决策树的最大预分类数量
RTrees.setUseSurrogates(False) #是否允许使用替代分叉点处理丢失的数据
RTrees.setCVFolds(0) #如果 CVFolds>1 那么就使用k-fold交叉修建决策树 其中k=CVFolds
RTrees.setTruncatePrunedTree(False)
RTrees.setCalculateVarImportance(True)
# 训练
ret = RTrees.train(train_data, cv2.ml.ROW_SAMPLE, train_label)
RTrees.save(model_path)
print(["模型训练完成!"])
def test(path, model_path):
# 加载测试数据
# path='data/jiexi1/'+str(name)+'/test/'
test_data, test_label, test_files=load_data(path)
test_data=np.array(test_data, dtype='float32')
test_data=np.reshape(test_data,(test_data.shape[0],-1))
test_label=np.array(test_label).ravel()
print("加载数据:", test_data.shape, test_label.shape, len(test_files))
# 测试
RTrees = cv2.ml.RTrees_load(model_path)
(ret, res) = RTrees.predict(test_data)
n=0
lens=len(test_data)
for i in range(lens):
if res[i]==test_label[i]:
n=n+1
# else:
# if test_label[i]==0:
# print(i, res[i],test_label[i], test_files[i])
# q=""
# for a in test_data[i]:
# q+=str(a)+","
# ff.write(str(test_label[i])+","+q+'\n')
Accuracy=n/lens
print("准确度为:",Accuracy)
ff=open('data2/gg.txt','w+')
model_path='model/test2/test1.xml'
# train('data2/jiexi/txt1/train/', model_path)
# test('data2/jiexi/txt1/test/', model_path)
test('test/test3.2/clear/', model_path)
# train('data/jiexi1/txt1/train/', model_path)
# test('data/jiexi1/txt1/test/', model_path)
# name_list=['txt1','txt2','txt3','txt4','txt5']
# for name in name_list:
# train_path='data/jiexi1/'+str(name)+'/train/'
# test_path='data/jiexi1/'+str(name)+'/test/'
# print(train_path)
# train(train_path,model_path)
# test(test_path,model_path)