sleep-analysis

import pandas as pd
import numpy as np
复制代码
result = pd.read_csv('out.csv')
result.head()
复制代码
/anaconda3/envs/py35/lib/python3.5/site-packages/IPython/core/interactiveshell.py:2728: DtypeWarning: Columns (3,4,5,6,7,8,10) have mixed types. Specify dtype option on import or set low_memory=False.
  interactivity=interactivity, compiler=compiler, result=result)
复制代码
Unnamed: 0uploadtimedataversiondatatypehlnssserialNumbert
00.02018-05-22T03:07:51.263Z{'serialNumber': '"S1DL11XHSHS1"', 't': '25.08...2True5918262"S1DL11XHSHS1"25.0893
11.02018-05-22T03:07:52.008Z{'serialNumber': '"S1F311DQSHS1"', 't': '26.40...2True504161"S1F311DQSHS1"26.4033
22.02018-05-22T03:07:52.059Z{'serialNumber': '"S1DF11MFSHS1"', 't': '17.17...2True7016067"S1DF11MFSHS1"17.1742
33.02018-05-22T03:07:52.350Z{'serialNumber': '"S1FD13XGSHS1"', 't': '21.24...2True640165"S1FD13XGSHS1"21.2446
44.02018-05-22T03:07:54.643Z{'serialNumber': '"S1DL11W4SHS1"', 't': '27.28...2True450160"S1DL11W4SHS1"27.2828
result = result.drop(result.columns[0], axis = 1)
复制代码
result_no_nan = result.dropna(subset=['ns'])
复制代码
result_no_nan = result_no_nan[result_no_nan['ns'] != 'ns']
复制代码
result_no_nan['ns'] = result_no_nan['ns'].astype(int)
复制代码
result_no_nan.ns.unique()
复制代码
array([2, 1, 0, 3])
复制代码
result_no_nan.ns.value_counts()
复制代码
0    394296
1    358078
2     16030
3      2155
Name: ns, dtype: int64
复制代码
result_no_nan['ns0'] = (result_no_nan['ns'] == 0).astype(int)
result_no_nan['ns1'] = (result_no_nan['ns'] == 1).astype(int)
result_no_nan['ns2'] = (result_no_nan['ns'] == 2).astype(int)
result_no_nan['ns3'] = (result_no_nan['ns'] == 3).astype(int)
复制代码
result_no_nan.head(1)
复制代码
uploadtimedataversiondatatypehlnssserialNumbertns0ns1ns2ns3
02018-05-22T03:07:51.263Z{'serialNumber': '"S1DL11XHSHS1"', 't': '25.08...2True5918262"S1DL11XHSHS1"25.08930010
out = result_no_nan[['serialNumber','uploadtime','h','l','s','t','ns0','ns1','ns2','ns3']]
复制代码
out.head(1)
复制代码
serialNumberuploadtimehlstns0ns1ns2ns3
0"S1DL11XHSHS1"2018-05-22T03:07:51.263Z59186225.08930010
target = np.stack([out.ns0.values, out.ns1.values, out.ns2.values, out.ns3.values]).T
复制代码
input_data = np.stack([out.h.values, out.l.values, out.s.values, out.t.values]).T
复制代码
np.shape(input_data), np.shape(target)
复制代码
((770559, 4), (770559, 4))
复制代码
import tensorflow as tf
复制代码
/anaconda3/envs/py35/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5
  return f(*args, **kwds)
/anaconda3/envs/py35/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
复制代码
x = tf.placeholder("float", shape=[None, 4])
y = tf.placeholder("float", shape=[None, 4])
复制代码
reg = tf.contrib.layers.l2_regularizer(scale=0.1)
复制代码
fc_1 = tf.contrib.layers.fully_connected(x, 
                                         32, 
                                         weights_initializer = tf.truncated_normal_initializer(stddev = 0.1),
                                         weights_regularizer = reg,
                                         activation_fn=tf.nn.relu)
复制代码
np.shape(fc_1)
复制代码
TensorShape([Dimension(None), Dimension(32)])
复制代码
fc_2 = tf.contrib.layers.fully_connected(fc_1, 
                                         32, 
                                         weights_initializer = tf.truncated_normal_initializer(stddev = 0.1),
                                         weights_regularizer = reg,
                                         activation_fn=tf.nn.relu)
复制代码
np.shape(fc_2)
复制代码
TensorShape([Dimension(None), Dimension(32)])
复制代码
keep_prob = tf.placeholder("float")
fc2_drop = tf.nn.dropout(fc_2, keep_prob)
复制代码
pred = tf.contrib.layers.fully_connected(fc2_drop, 4, activation_fn=tf.nn.softmax)
复制代码
np.shape(pred), np.shape(target)
复制代码
(TensorShape([Dimension(None), Dimension(4)]), (770559, 4))
复制代码
weights = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
reg_ws = tf.contrib.layers.apply_regularization(reg, weights_list = weights)
复制代码
loss = -tf.reduce_sum(y * tf.log(tf.clip_by_value(pred, 0.0001, 1))) + tf.reduce_sum(reg_ws)
复制代码
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
复制代码
train_step = tf.train.AdamOptimizer(0.001).minimize(loss)
复制代码
sess = tf.Session()
sess.run(tf.global_variables_initializer())
loss_train = []
train_acc = []
复制代码
for i in range(4000):
    index = np.random.permutation(len(target))
    input_data =input_data[index]
    target = target[index]
    sess.run(train_step, feed_dict={x:input_data, y:target, keep_prob:0.5})
    if i % 50 == 0:
        loss_temp = sess.run(loss, feed_dict= {x:input_data, y:target, keep_prob:1})
        train_acc_temp = sess.run(accuracy, feed_dict= {x:input_data, y:target, keep_prob:1})
        loss_train.append(loss_temp)
        train_acc.append(train_acc_temp)
        print(loss_temp, train_acc_temp)
复制代码
1375709.8 0.5048309
609597.56 0.7247907
541404.9 0.73670805
496564.28 0.7462751
487508.44 0.7478636
482841.1 0.7487253
480346.75 0.7482607
477108.9 0.7484346
475905.06 0.7484696
474179.06 0.74926645
472259.4 0.7497012
471763.56 0.74967396
471339.75 0.7500412
470828.5 0.75031114
471458.1 0.7515453
470603.12 0.7517828
469874.47 0.751653
469248.5 0.75303257
468931.7 0.7540331
468528.1 0.7539851
468503.97 0.75422126
468131.6 0.75447303
468123.9 0.7547144
468139.78 0.75418496
467144.9 0.7546249
467890.4 0.7549545
467048.97 0.7551518
466745.1 0.75575006
466457.34 0.75609916
465929.1 0.75655985
465421.4 0.75609136
465065.8 0.75649625
466193.6 0.7567532
465614.88 0.7571672
464973.97 0.75710493
465103.9 0.7571659
464903.06 0.7569699
465279.66 0.7574021
463968.34 0.7576798
464071.3 0.75737613
464002.7 0.7574748
464098.28 0.7576383
463965.44 0.75771487
463142.53 0.7580419
463134.94 0.75802374
463714.1 0.757332
463228.44 0.7578615
462894.66 0.758134
463095.1 0.7582041
462459.34 0.7581211
462745.3 0.7582807
462511.56 0.7579199
461954.0 0.758317
461555.75 0.7583235
461758.25 0.7583313
461913.78 0.7582119
461906.06 0.7583274
462053.12 0.75818336
461215.22 0.7583352
461650.03 0.75818205
461360.84 0.75823134
460985.78 0.7580925
461389.03 0.75806135
461056.66 0.7584001
460863.2 0.7579446
460948.8 0.7576357
460820.84 0.75813794
460422.53 0.75801337
460513.97 0.75804317
460623.8 0.75829107
460722.16 0.75801337
460025.47 0.7582729
460292.94 0.7576668
460127.25 0.7580146
460008.84 0.75840396
460206.84 0.7582664
459823.66 0.7583209
460277.3 0.757881
459591.34 0.75830925
459873.66 0.7581042
复制代码
import matplotlib.pyplot as plt
复制代码
plt.plot(loss_train, 'k-')
plt.title('train loss')
plt.show()
复制代码

plt.plot(train_acc, 'b-', label = 'train_acc')
plt.title('train_acc')
plt.legend()
plt.show()
复制代码

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值