fall down

import pandas as pd
In [2]:
df = pd.read_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/Annotated Data/FOL/FOL_1_1_annotated.csv') 
In [3]:
df.head()
Out[3]:
 timestamprel_timeacc_xacc_yacc_zgyro_xgyro_ygyro_zazimuthpitchrolllabel
019138805190000.0000000.900011-9.557625-1.493466-0.175318-0.0329870.0009169.01564317.873316-41.670730STD
119138847900000.0042710.897037-9.557229-1.4861300.045509-0.0167990.0149669.22349618.114683-41.547146STD
219138908270000.0103080.892833-9.556668-1.4757600.0244350.0054980.0155779.43086318.326233-41.400665STD
319138947600000.0142410.890094-9.556303-1.469004-0.003971-0.0018330.0100799.62428518.487745-41.254680STD
419138997160000.0191970.886642-9.555843-1.460490-0.002443-0.0012220.0167999.80959918.640276-41.121006STD
In [4]:
x = df[['acc_x','acc_y','acc_z']] 
In [5]:
x.head()
Out[5]:
 acc_xacc_yacc_z
00.900011-9.557625-1.493466
10.897037-9.557229-1.486130
20.892833-9.556668-1.475760
30.890094-9.556303-1.469004
40.886642-9.555843-1.460490
In [11]:
data_move=20
data_scale=6 def transform_rgb(x): return (x + data_move) * data_scale x = df[['acc_x','acc_y','acc_z']].apply(transform_rgb) 
In [12]:
x.head()
Out[12]:
 acc_xacc_yacc_z
0125.40006662.654249111.039201
1125.38222062.656628111.083221
2125.35699562.659992111.145442
3125.34056262.662183111.185978
4125.31985462.664944111.237057
In [13]:
x = x.stack().to_frame().T 
In [14]:
x.head()
Out[14]:
 0123...1969197019711972
 acc_xacc_yacc_zacc_xacc_yacc_zacc_xacc_yacc_zacc_x...acc_zacc_xacc_yacc_zacc_xacc_yacc_zacc_xacc_yacc_z
0125.40006662.654249111.039201125.3822262.656628111.083221125.35699562.659992111.145442125.340562...153.21973167.514197112.938234153.068426167.4019112.989777152.970371167.256695112.989777152.941329

1 rows × 5919 columns

In [8]:
x.head()
Out[8]:
 0123...1969197019711972
 acc_xacc_yacc_zacc_xacc_yacc_zacc_xacc_yacc_zacc_x...acc_zacc_xacc_yacc_zacc_xacc_yacc_zacc_xacc_yacc_z
00.900011-9.557625-1.4934660.897037-9.557229-1.486130.892833-9.556668-1.475760.890094...5.5366227.919033-1.1769615.5114047.900317-1.168375.4950627.876116-1.168375.490222

1 rows × 5919 columns

In [3]:
import os
import tensorflow as tf import numpy as np import pandas as pd 
 

convert the data

In [36]:
df_list = []
sum_df = pd.DataFrame() #sum_df = df_.fillna(0) # with 0s rather than NaNs PATH = '/home/helong/share/ML/MobiAct_Dataset_v2.0/Annotated Data/STU' for file in os.listdir(PATH): # print(file) df = pd.read_csv(os.path.join(PATH,file)) if not df.empty: df_list.append(df) for df in df_list: x = df[['acc_x','acc_y','acc_z']].apply(transform_rgb) x = x.stack().to_frame().T # print(x.head()) sum_df = sum_df.append(x) #sum_df.insert(idx, col_name, value) sum_df.insert(loc=0, column='A', value=0) print(sum_df.head()) #print(sum_df.info()) sum_df.to_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/tran_data_transform/STU.csv',index=False) #final_df = df.append(df for df in df_list) #final_df[0].count() print("done") 
 
   A           0                                   1                          \
           acc_x       acc_y       acc_z       acc_x       acc_y       acc_z   
0  0  132.343160  178.774379  108.683075  132.389976  178.857848  108.755623   
0  0  119.607388  179.420339  113.534330  119.567409  179.404633  113.488640   
0  0  121.276736  178.583105  117.924215  121.318685  178.552728  117.906857   
0  0  124.149364  179.019596  117.656289  124.163629  179.028155  117.670554   
0  0  106.486053  177.582995  116.405081  106.416345  177.555113  116.419023   

            2                          ...   1994  1995              1996  \
        acc_x       acc_y       acc_z  ...  acc_z acc_x acc_y acc_z acc_x   
0  132.450588  178.888155  108.785929  ...    NaN   NaN   NaN   NaN   NaN   
0  119.527375  179.388905  113.442886  ...    NaN   NaN   NaN   NaN   NaN   
0  121.360383  178.522533  117.889603  ...    NaN   NaN   NaN   NaN   NaN   
0  124.178146  179.036865  117.685071  ...    NaN   NaN   NaN   NaN   NaN   
0  106.340443  177.524753  116.434203  ...    NaN   NaN   NaN   NaN   NaN   

               1997              
  acc_y acc_z acc_x acc_y acc_z  
0   NaN   NaN   NaN   NaN   NaN  
0   NaN   NaN   NaN   NaN   NaN  
0   NaN   NaN   NaN   NaN   NaN  
0   NaN   NaN   NaN   NaN   NaN  
0   NaN   NaN   NaN   NaN   NaN  

[5 rows x 5995 columns]
done
In [2]:
def get_all_data():
    PATH = '/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform' fs = os.listdir(PATH) all_data = pd.DataFrame() for f in fs: file_path = os.path.join(PATH, f) print(file_path) if 'csv' in f: data = pd.read_csv(file_path, index_col=False, nrows=5, low_memory=False) data = data.iloc[1:,0:1201] #print(data.head()) #break all_data = all_data.append(data) #for fast test #break #count_row = all_data.shape[0] #print(count_row) np.random.shuffle(all_data.values) return all_data 
In [4]:
def get_test_data():
    PATH = '/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform' fs = os.listdir(PATH) all_data = pd.DataFrame() for f in fs: file_path = os.path.join(PATH, f) print(file_path) if 'csv' in f: data = pd.read_csv(file_path, index_col=False, low_memory=False) data = data.iloc[1:,0:1201] #print(data.head()) #break all_data = all_data.append(data) #for fast test #break #count_row = all_data.shape[0] #print(count_row) np.random.shuffle(all_data.values) return all_data 
In [46]:
data = get_all_data()
print(data.shape[0]) #all_data.to_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/all_data_transform.csv',nrows=10, index=False) #all_data.to_csv('/home/helong/share/ML/MobiAct_Dataset_v2.0/all_data_transform.csv',index=False) 
 
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/CSO.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SIT.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/STN.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/JUM.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SLH.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/JOG.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/STD.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SBE.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/WAL.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/BSC.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SLW.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/CSI.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/CHU.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SCH.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/STU.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SRH.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SBW.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/FOL.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/FKL.csv
/home/helong/share/ML/MobiAct_Dataset_v2.0/train_data_transform/SDL.csv
80
In [5]:
CLASS_NUM = 1
LEARNING_RATE = 0.001 TRAIN_STEP = 10000 BATCH_SIZE = 50 _index_in_epoch = 0 _epochs_completed = 0 _num_examples = 0 MODEL_SEVE_PATH = '../model/model.ckpt' 
In [6]:
def wights_variable(shape):
    '''  权重变量tensor  :param shape:  :return:  ''' wights = tf.truncated_normal(shape=shape,stddev=0.1) return tf.Variable(wights,dtype=tf.float32) def biases_variable(shape): '''  偏置变量tensor  :param shape:  :return:  ''' bias = tf.constant(0.1,shape=shape) return tf.Variable(bias,dtype=tf.float32) def conv2d(x,kernel): '''  网络卷积层  :param x: 输入x  :param kernel: 卷积核  :return: 返回卷积后的结果  ''' return tf.nn.conv2d(x,kernel,strides=[1,1,1,1],padding='SAME') def max_pooling_2x2(x): '''  最大赤化层  :param x: 输入x  :return: 返回池化后数据  ''' return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME') def lrn(x): '''  local response normalization  局部响应归一化,可以提高准确率  :param x: 输入x  :return:  ''' return tf.nn.lrn(x,4,1.0,0.001,0.75) def fall_net(x): '''  跌到检测网络  :param x: 输入tensor,shape=[None,]  :return:  ''' with tf.name_scope('reshape'): x = tf.reshape(x,[-1,20,20,3]) #x = x / 255.0 * 2 - 1 with tf.name_scope('conv1'): # value shape:[-1,18,18,32] conv1_kernel = wights_variable([5,5,3,32]) conv1_bias 

转载于:https://www.cnblogs.com/ljstu/p/10184417.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值