台大2020年深度学习课程作业一(tensorflow版)

台大2020年深度学习课程作业一(tensorflow版)

基于tensorflow的框架,写了一个基于多层神经网络的求解方法,代码如下:

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf

#数据读取
total_data=pd.read_csv('./data/train.csv',encoding='big5')
data=total_data.iloc[:,3:]
data[data=='NR']=0
raw_data=data.to_numpy()
print("数据大小:",data.shape)
data.head(20)
total_data.head(20)

#拆分每月数据
month_data={}
for month in range(12):
    sample=np.empty([18,480])
    for day in range(20):
        sample[:,day*24:(day+1)*24]=raw_data[18*(20*month+day):18*(20*month+day+1),:]
        month_data[month]=sample

#划分特征值和目标值(每9个小时为一组数据,第10小时为目标值)
x=np.empty([12*471,18*9],dtype=np.float32)
y=np.empty([12*471,1],dtype=np.float32)
for month in range(12):
    for day in range(20):
        for hour in range(24):
            if(day==19 and hour>14):
                continue
            x[month*471+day*24+hour,:]=month_data[month][:,day*24+hour:day*24+hour+9].reshape(1,-1)
            y[month*471+day*24+hour,0]=month_data[month][9, day * 24 + hour + 9]
print(x)
print(y)

# 数据归一化
mean_x=np.mean(x,axis=0)#一直对0轴和1轴很迷
std_x=np.std(x,axis=0)
for i in range(x.shape[0]):
    for j in range(x.shape[1]):
        if std_x[j]!=0:
            x[i][j]=(x[i][j]-mean_x[j])/std_x[j]
print(x)

# 拆分测试集和训练集
import math
x_train_set = x[: math.floor(len(x) * 0.8), :]
y_train_set = y[: math.floor(len(y) * 0.8), :]
x_validation = x[math.floor(len(x) * 0.8): , :]
y_validation = y[math.floor(len(y) * 0.8): , :]
print("训练集大小:",len(x_train_set))
print("测试集大小:",len(x_validation))

# 网络节点定义
X=tf.placeholder(tf.float32,shape=[None,18*9],name='Batch')
Y=tf.placeholder(tf.float32,shape=[None,1],name='Label')
layerNum_1=9
layerNum_2=4
Weights={
            "Weight_1":tf.Variable(tf.random_normal(shape=[18*9,layerNum_1]),name="Weight_1"),
            "Weight_2":tf.Variable(tf.random_normal(shape=[layerNum_1,layerNum_2]),name="Weight_2"),
            "Weight_3":tf.Variable(tf.random_normal(shape=[layerNum_2,1]),name="Weight_3")
}
Bias={
        "Bias_1":tf.Variable(tf.zeros(shape=[layerNum_1]),name="Bias_1"),
        "Bias_2":tf.Variable(tf.zeros(shape=[layerNum_2]),name="Bias_2"),
        "Bias_3":tf.Variable(tf.zeros(shape=[1]),name="Bias_3")
}
layer_1=tf.nn.sigmoid(tf.matmul(X,Weights["Weight_1"])+Bias[ "Bias_1"])
layer_2=tf.nn.sigmoid(tf.matmul(layer_1,Weights["Weight_2"])+Bias[ "Bias_2"])
output=tf.nn.relu(tf.matmul(layer_2,Weights["Weight_3"])+Bias[ "Bias_3"])
#定义损失函数
cost=tf.reduce_mean(tf.square(Y-output))

learning_rate=0.1
optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
#训练设置
training_epochs=5001
display_step=100
loss_train,loss_valid,y_validation_output=list(),list(),list()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for epoch in range(training_epochs):
#        for (x_,y_) in zip(x_train_set,y_train_set):
#            x_=x_.reshape(1,-1)
#            y_=y_.reshape(-1,1)
#        sess.run(optimizer,feed_dict={X:x_,Y:y_})
        sess.run(optimizer,feed_dict={X:x_train_set,Y:y_train_set})
        if (epoch+1)%display_step==0:
            loss_train.append(sess.run(cost,feed_dict={X:x_train_set,Y:y_train_set}))
            loss_valid.append(sess.run(cost,feed_dict={X:x_validation,Y:y_validation}))
            print("Epech={0},loss_train={1},loss_valid={2}".format(epoch+1,loss_train[-1],loss_valid[-1]))
    print("finish")
    for x_ in x_validation:
        x_=x_.reshape(1,-1)
        y_validation_output.append(sess.run(output,feed_dict={X:x_}))
        

运行效果很一般,仅供参考(-_-||)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值