R2周LSTM-火灾温度预测打卡

import tensorflow as tf
import pandas as pd
import numpy as np
df_1 = pd.read_csv("F://ZSQ/R2.csv")
import matplotlib.pyplot as plt
import seaborn as sns

plt.rcParams['savefig.dpi'] = 500 #图片像素
plt.rcParams['figure.dpi']  = 500 #分辨率

fig, ax =plt.subplots(1,3,constrained_layout=True, figsize=(14, 3))

sns.lineplot(data=df_1["Tem1"], ax=ax[0])
sns.lineplot(data=df_1["CO 1"], ax=ax[1])
sns.lineplot(data=df_1["Soot 1"], ax=ax[2])
plt.show()

在这里插入图片描述

dataFrame = df_1.iloc[:,1:]
dataFrame
Tem1CO 1Soot 1
025.00.0000000.000000
125.00.0000000.000000
225.00.0000000.000000
325.00.0000000.000000
425.00.0000000.000000
............
5943295.00.0000770.000496
5944294.00.0000770.000494
5945292.00.0000770.000491
5946291.00.0000760.000489
5947290.00.0000760.000487

5948 rows × 3 columns

width_X = 8
width_y = 1
X = []
y = []

in_start = 0

for _, _ in df_1.iterrows():
    in_end  = in_start + width_X
    out_end = in_end   + width_y
    
    if out_end < len(dataFrame):
        X_ = np.array(dataFrame.iloc[in_start:in_end , ])
        X_ = X_.reshape((len(X_)*3))
        y_ = np.array(dataFrame.iloc[in_end  :out_end, 0])

        X.append(X_)
        y.append(y_)
    
    in_start += 1

X = np.array(X)
y = np.array(y)

X.shape, y.shape
((5939, 24), (5939, 1))
from sklearn.preprocessing import MinMaxScaler

#将数据归一化,范围是0到1
sc       = MinMaxScaler(feature_range=(0, 1))
X_scaled = sc.fit_transform(X)
X_scaled.shape
(5939, 24)
X_scaled = X_scaled.reshape(len(X_scaled),width_X,3)
X_scaled.shape
(5939, 8, 3)
X_train = np.array(X_scaled[:5000]).astype('float64')
y_train = np.array(y[:5000]).astype('float64')

X_test  = np.array(X_scaled[5000:]).astype('float64')
y_test  = np.array(y[5000:]).astype('float64')
X_train.shape
(5000, 8, 3)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,LSTM,Bidirectional
from tensorflow.keras        import Input

# 多层 LSTM
model_lstm = Sequential()
model_lstm.add(LSTM(units=64, activation='relu', return_sequences=True,
               input_shape=(X_train.shape[1], 3)))
model_lstm.add(LSTM(units=64, activation='relu'))

model_lstm.add(Dense(width_y))
# 只观测loss数值,不观测准确率,所以删去metrics选项
model_lstm.compile(optimizer=tf.keras.optimizers.Adam(1e-3),
                   loss='mean_squared_error')  # 损失函数用均方误差
X_train.shape, y_train.shape
((5000, 8, 3), (5000, 1))
history_lstm = model_lstm.fit(X_train, y_train, 
                         batch_size=64, 
                         epochs=40, 
                         validation_data=(X_test, y_test),
                         validation_freq=1)
Epoch 1/40
79/79 [==============================] - 3s 15ms/step - loss: 11661.5508 - val_loss: 3187.5928
Epoch 2/40
79/79 [==============================] - 1s 9ms/step - loss: 198.4161 - val_loss: 878.1593
Epoch 3/40
79/79 [==============================] - 1s 9ms/step - loss: 26.0834 - val_loss: 358.6115
Epoch 4/40
79/79 [==============================] - 1s 9ms/step - loss: 10.6357 - val_loss: 318.4326
Epoch 5/40
79/79 [==============================] - 1s 9ms/step - loss: 10.1192 - val_loss: 338.7864
Epoch 6/40
79/79 [==============================] - 1s 10ms/step - loss: 9.7892 - val_loss: 322.2624
Epoch 7/40
79/79 [==============================] - 1s 9ms/step - loss: 8.6253 - val_loss: 290.0067
Epoch 8/40
79/79 [==============================] - 1s 9ms/step - loss: 8.7918 - val_loss: 281.0205
Epoch 9/40
79/79 [==============================] - 1s 9ms/step - loss: 8.5656 - val_loss: 290.8271
Epoch 10/40
79/79 [==============================] - 1s 9ms/step - loss: 8.6940 - val_loss: 183.6400
Epoch 11/40
79/79 [==============================] - 1s 9ms/step - loss: 9.4659 - val_loss: 264.0771
Epoch 12/40
79/79 [==============================] - 1s 9ms/step - loss: 8.1948 - val_loss: 203.8069
Epoch 13/40
79/79 [==============================] - 1s 9ms/step - loss: 8.9887 - val_loss: 245.1120
Epoch 14/40
79/79 [==============================] - 1s 9ms/step - loss: 8.0976 - val_loss: 248.8491
Epoch 15/40
79/79 [==============================] - 1s 9ms/step - loss: 8.1635 - val_loss: 242.8655
Epoch 16/40
79/79 [==============================] - 1s 9ms/step - loss: 8.5177 - val_loss: 259.7970
Epoch 17/40
79/79 [==============================] - 1s 9ms/step - loss: 7.8439 - val_loss: 191.9199
Epoch 18/40
79/79 [==============================] - 1s 10ms/step - loss: 8.0736 - val_loss: 193.8541
Epoch 19/40
79/79 [==============================] - 1s 10ms/step - loss: 8.5479 - val_loss: 232.2395
Epoch 20/40
79/79 [==============================] - 1s 10ms/step - loss: 7.9623 - val_loss: 177.6173
Epoch 21/40
79/79 [==============================] - 1s 10ms/step - loss: 7.6488 - val_loss: 170.0088
Epoch 22/40
79/79 [==============================] - 1s 10ms/step - loss: 9.5648 - val_loss: 197.5257
Epoch 23/40
79/79 [==============================] - 1s 10ms/step - loss: 8.6869 - val_loss: 259.8324
Epoch 24/40
79/79 [==============================] - 1s 10ms/step - loss: 7.9198 - val_loss: 395.1705
Epoch 25/40
79/79 [==============================] - 1s 10ms/step - loss: 11.2300 - val_loss: 205.4825
Epoch 26/40
79/79 [==============================] - 1s 9ms/step - loss: 7.9246 - val_loss: 123.5014
Epoch 27/40
79/79 [==============================] - 1s 9ms/step - loss: 7.8618 - val_loss: 264.1580
Epoch 28/40
79/79 [==============================] - 1s 9ms/step - loss: 8.4603 - val_loss: 173.8278
Epoch 29/40
79/79 [==============================] - 1s 9ms/step - loss: 7.1544 - val_loss: 200.6295
Epoch 30/40
79/79 [==============================] - 1s 9ms/step - loss: 7.0219 - val_loss: 104.8911
Epoch 31/40
79/79 [==============================] - 1s 10ms/step - loss: 8.2098 - val_loss: 104.6629
Epoch 32/40
79/79 [==============================] - 1s 9ms/step - loss: 7.5574 - val_loss: 202.5753
Epoch 33/40
79/79 [==============================] - 1s 9ms/step - loss: 7.3496 - val_loss: 103.5376
Epoch 34/40
79/79 [==============================] - 1s 9ms/step - loss: 7.0397 - val_loss: 150.2891
Epoch 35/40
79/79 [==============================] - 1s 9ms/step - loss: 6.6856 - val_loss: 111.4385
Epoch 36/40
79/79 [==============================] - 1s 9ms/step - loss: 6.7749 - val_loss: 102.8437
Epoch 37/40
79/79 [==============================] - 1s 10ms/step - loss: 7.7543 - val_loss: 117.2417
Epoch 38/40
79/79 [==============================] - 1s 9ms/step - loss: 7.3038 - val_loss: 162.2436
Epoch 39/40
79/79 [==============================] - 1s 9ms/step - loss: 6.8557 - val_loss: 113.8060
Epoch 40/40
79/79 [==============================] - 1s 10ms/step - loss: 6.5005 - val_loss: 198.0052
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

plt.figure(figsize=(5, 3),dpi=120)

plt.plot(history_lstm.history['loss']    , label='LSTM Training Loss')
plt.plot(history_lstm.history['val_loss'], label='LSTM Validation Loss')

plt.title('Training and Validation Loss')
plt.legend()
plt.show()

在这里插入图片描述

predicted_y_lstm = model_lstm.predict(X_test)                        # 测试集输入模型进行预测

y_test_one = [i[0] for i in y_test]
predicted_y_lstm_one = [i[0] for i in predicted_y_lstm]

plt.figure(figsize=(5, 3),dpi=120)
# 画出真实数据和预测数据的对比曲线
plt.plot(y_test_one[:1000], color='red', label='真实值')
plt.plot(predicted_y_lstm_one[:1000], color='blue', label='预测值')

plt.title('Title')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show()
30/30 [==============================] - 1s 3ms/step

在这里插入图片描述

from sklearn import metrics
"""
RMSE :均方根误差  ----->  对均方误差开方
R2   :决定系数,可以简单理解为反映模型拟合优度的重要的统计量
"""
RMSE_lstm  = metrics.mean_squared_error(predicted_y_lstm, y_test)**0.5
R2_lstm    = metrics.r2_score(predicted_y_lstm, y_test)

print('均方根误差: %.5f' % RMSE_lstm)
print('R2: %.5f' % R2_lstm)
均方根误差: 14.07143
R2: 0.59414

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值