# LSTM电力预测 ～期末大作业～

## LSTM预测电力消耗：

#### 1.PP_inference :

import tensorflow as tf

HIDDEN_SIZE = 500
NUM_LAYERS = 2

def lstm_model(x,dropout_keep_prob):

lstm_cells = [
tf.nn.rnn_cell.DropoutWrapper(
tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE),
output_keep_prob=dropout_keep_prob)
for _ in range(NUM_LAYERS)]
cell = tf.nn.rnn_cell.MultiRNNCell(lstm_cells)
print("cell_created")
outputs, _ = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
output = outputs[:,-1,:]

predictions01 = tf.contrib.layers.fully_connected(output, 100,activation_fn= tf.nn.relu)
predictions = tf.contrib.layers.fully_connected(predictions01, 1, activation_fn=None)
return predictions

#### 2.PP_train :

batchsize我调了好久。。。最终感觉100的时候loss比较稳定，收敛值也可以接受

inputsize为输入值的维度，这里为3

import numpy as np
import tensorflow as tf
import pandas as pd
import PP_inference

TIMESTEPS = 20
TRAINING_STEPS = 10000
BATCH_SIZE = 100

INPUT_SIZE = 3

LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY = 0.9

NUM_EXAMPLES = 50000

f=open('BSE.csv')
data=df.iloc[:,0:3].values

MODEL_SAVE_PATH = "model_saved/"
MODEL_NAME = "model.ckpt"

LSTM_KEEP_PROB = 0.9

def variable_summaries(var, name):
with tf.name_scope('summaries'):
tf.summary.histogram(name, var)
mean = tf.reduce_mean(var)
tf.summary.scalar('mean/'+name, mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean)))
tf.summary.scalar('stddev/'+name, stddev)

def get_train_data(time_step,train_begin,train_end):
# 数据预处理过程，由于为了避免改变温度的分布规律，这里仅减了均值，温度与电力做了归一化处理
# 其实应该用BN的！真的懒了，大家可以试试，无脑上BN肯定没错的。。。
data_time = data[train_begin:train_end,0]
data_temp = data[train_begin:train_end,1]
data_power = data[train_begin:train_end,2]
mdt = np.mean(data_time,axis=0)
print(mdt)
mdtp = np.mean(data_temp,axis=0)
print(mdtp)
stddtp = np.std(data_temp,axis=0)
print(stddtp)
mdtpower = np.mean(data_power,axis=0)
print(mdtpower)
stddtpower = np.std(data_power,axis=0)
print(stddtpower)
normalized_data_time = data_time-mdt
normalized_data_time = np.array(normalized_data_time).reshape(-1,1)
print(normalized_data_time.shape)
normalized_data_temp = (data_temp-mdtp)/stddtp
normalized_data_temp = np.array(normalized_data_temp).reshape(-1, 1)
normalized_data_power = (data_power-mdtpower)/stddtpower
normalized_data_power = np.array(normalized_data_power).reshape(-1, 1)
normalized_train_data=np.concatenate((normalized_data_time,normalized_data_temp,normalized_data_power),axis=1)
print(normalized_train_data.shape)
data_labels = data[train_begin:train_end,1:3]
train_x=[]
train_y=[]

for i in range(len(normalized_train_data)-time_step):

train_x.append([normalized_train_data[i:i + time_step, :3]])
train_y.append([data_labels[i+time_step,1]])
print("get_train_data_finished")

train_x=np.array(train_x).reshape(-1,TIMESTEPS,INPUT_SIZE)
train_y=np.array(train_y).squeeze()
train_y=np.array(train_y).reshape(-1,1)
print(np.array(train_x, dtype=np.float32).shape)
print(np.array(train_y, dtype=np.float32).shape)
return np.array(train_x, dtype=np.float32), np.array(train_y, dtype=np.float32)

def train(train_x, train_y):
global_step = tf.Variable(0, trainable=False)
ds = tf.data.Dataset.from_tensor_slices((train_x,train_y))
ds = ds.repeat().shuffle(100000).batch(BATCH_SIZE)
X, y = ds.make_one_shot_iterator().get_next()
predictions = PP_inference.lstm_model(X,LSTM_KEEP_PROB)
print(predictions)
loss = tf.losses.mean_squared_error(labels=y,predictions=predictions)
tf.summary.scalar('loss',loss)
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
NUM_EXAMPLES / BATCH_SIZE,
LEARNING_RATE_DECAY
)
print("All paras are setted")
saver = tf.train.Saver()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter("log/log", tf.get_default_graph())
with tf.Session() as sess:
tf.global_variables_initializer().run()
for i in range(TRAINING_STEPS):

if i % 100 == 0:

run_options = tf.RunOptions(
trace_level = tf.RunOptions.FULL_TRACE)
print("train step is  %s loss is  %s " % (str(step), str(l)))
saver.save(sess, "model_saved/model.ckpt")
print("model has been saved")

else:

summary, _, l, step = sess.run([merged, train_op, loss, global_step])

train_writer.close()

def main(argv=None):
train_x, train_y = get_train_data(TIMESTEPS, 0, NUM_EXAMPLES)
train(train_x, train_y)

if __name__ == '__main__':
tf.app.run()

#### 3.PP_eval:

import tensorflow as tf
import matplotlib.pyplot as plt
import PP_inference
import PP_train
import pandas as pd
import numpy as np

TIMESTEPS = 20

INPUT_SIZE = 3

LSTM_KEEP_PROB = 1

f = open('BSE.csv')
data = df.iloc[:,0:3].values

def get_test_data(time_step,test_begin,test_end):

data_time = data[test_begin:test_end, 0]
data_temp = data[test_begin:test_end, 1]
data_power = data[test_begin:test_end, 2]
mdt=[12.49888]
mdt = np.array(mdt,dtype=np.float32)
mdtp = [50.94878]
mdtp = np.array(mdtp,dtype=np.float32)
stddtp = [18.223719612406246]
stddtp = np.array(stddtp,dtype=np.float32)
mdtpower = [14916.97936]
mdtpower = np.array(mdtpower,dtype=np.float32)
stddtpower = [2938.1257186400294]
stddtpower = np.array(stddtpower,dtype=np.float32)
normalized_data_time = data_time - mdt
normalized_data_time = np.array(normalized_data_time).reshape(-1, 1)
print(normalized_data_time.shape)
normalized_data_temp = (data_temp - mdtp) / stddtp
normalized_data_temp = np.array(normalized_data_temp).reshape(-1, 1)
normalized_data_power = (data_power - mdtpower) / stddtpower
normalized_data_power = np.array(normalized_data_power).reshape(-1, 1)
normalized_test_data = np.concatenate((normalized_data_time, normalized_data_temp, normalized_data_power), axis=1)
test_x = []
test_y = []

data_test=data[test_begin:test_end]

for i in range(len(normalized_test_data)-time_step):
test_x.append([normalized_test_data[i:i + time_step, :3]])
test_y.append([data_test[i + time_step, 1:3]])
print("get_test_data_finished")
test_x = np.array(test_x).reshape(-1,TIMESTEPS,INPUT_SIZE)
print(len(test_y))
test_y = np.array(test_y,dtype=np.float32).squeeze()
return np.array(test_x,dtype=np.float32),np.array(test_y,dtype=np.float32)

def run_eval(test_x, test_y):
ds = tf.data.Dataset.from_tensor_slices((test_x, test_y))
ds = ds.batch(1)
X,y = ds.make_one_shot_iterator().get_next()
pred = PP_inference.lstm_model(X,LSTM_KEEP_PROB)
predictions = []
label = []
realtemp=[]
saver = tf.train.Saver(tf.global_variables())
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(
PP_train.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("checkpoint finded")
else:
print("No checkpoint finded")
for i in range(len(test_y)):
predget,yy =sess.run([pred,y])
yy=np.array(yy).squeeze()
print(i)
print(yy)
predget=np.array(predget).squeeze()
predictions.append(predget)
label.append(yy[1])
realtemp.append(yy[0])
predictions = np.array(predictions).squeeze()
predictionstwo = np.array(label).squeeze()
realtemp = np.array(realtemp).squeeze()
print(predictions)
plt.figure()
plt.plot(predictions, label='predictions')
plt.plot(predictionstwo, label='real')
plt.legend()
plt.show()

plt.figure()
plt.plot(realtemp,label='realtemp')
plt.legend()
plt.show()

def main(argv=None):
test_x, test_y = get_test_data(TIMESTEPS, 50492,51192)
run_eval(test_x,test_y)

if __name__ == '__main__':
tf.app.run()

github链接：  点击打开链接