Pytorch实现:LSTM-火灾温度预测

前期工作

  • 语言环境:Python3.9.18
  • 编译器:Jupyter Lab
  • 深度学习环境:Pytorch 1.12.1

1.设置GPU

import torch
import torch.nn as nn
import torchvision
from torchvision import transforms, datasets

import os,PIL,pathlib

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

device

2. 导入数据

df = pd.read_csv("F:/365data/R2/woodpine2.csv")
df.head()

3. 数据可视化

import matplotlib.pyplot as plt
import seaborn as sns

plt.rcParams['savefig.dpi'] = 500
plt.rcParams['figure.dpi'] = 500

fig ,ax = plt.subplots(1,3,constrained_layout=True,figsize=(14,3))

sns.lineplot(data=df['Tem1'],ax=ax[0])
sns.lineplot(data=df['CO 1'],ax=ax[1])
sns.lineplot(data=df['Soot 1'],ax=ax[2])
plt.show()
data = df.iloc[:,1:]
data.head()

二、构建数据集

1、数据集预处理

width_X= 8 # 时间步,时间步特征为3
width_y = 1

X = []
y = []

in_start = 0

for _, _ in data.iterrows(): # 用于遍历data的每一行,返回的是每一行的index和每一行的值
    in_end = in_start + width_X
    out_end = in_end + width_y
    
    if out_end <= len(data):
        X_ = np.array(data.iloc[in_start:in_end,])
        X_ = X_.reshape((len(X_)*3))
        y_ = np.array(data.iloc[in_end:out_end,0])

        X.append(X_)
        y.append(y_)
    
    in_start += 1

X = np.array(X)
y = np.array(y)

X.shape,y.shape
from sklearn.preprocessing import MinMaxScaler

# 归一化
sc = MinMaxScaler(feature_range=(0,1))
X_scaled = sc.fit_transform(X)

X_scaled = X_scaled.reshape((X_scaled.shape[0],width_X,3))
X_scaled.shape

3、划分数据集

# X_train = X_scaled[:5000].astype('float64') 直接修改原始数据
X_train = np.array(X_scaled[:5000]).astype('float64') # 切片创建新数组,不会修改原始数据
y_train = np.array(y[:5000]).astype('float64')

X_test = np.array(X_scaled[5000:]).astype('float64')
y_test = np.array(y[5000:]).astype('float64')

X_train.shape,y_train.shape,X_test.shape,y_test.shape
# Create datasets and data loaders
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
test_dataset = TensorDataset(X_test_tensor, y_test_tensor)

train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)

三、模型训练

1、构建模型

import torch.nn.functional as F
# 使用pytorch构建简单的LSTM模型
class LSTM(nn.Module): # 通常输入为3维张量,第一维为序列长度,第二维为批次大小,第三维为输入大小,这样的数据方式能并行过LSTM,从而一定程度上提高处理速度
    def __init__(self,input_size,hidden_size): # hidden_size为隐层的神经元个数
        super(LSTM,self).__init__()
        self.LSTM1 = nn.LSTM(input_size, hidden_size)# 总是返回所有时间步输出
        self.LSTM2 = nn.LSTM(hidden_size, hidden_size)
        self.fc = nn.Linear(64, 1)

    def forward(self,x):
        out,_ = self.LSTM1(x)
        out,_ = self.LSTM2(out)
        out = self.fc(out[:,-1,:]) # 最后一个时间步结果的集合
        return out # 一批批输出,最后为一个元组
    
model = LSTM(input_size=3,hidden_size=64).to(device)
print(model)

def train(dataloader,model,optimizer,loss_fn):
    num_batches = len(dataloader)

    train_loss = 0

    for X,y in dataloader:
        X,y = X.to(device),y.to(device) #[batch_size,seq_len,input_size]
        # 即使与LSTM的输入维度不同,也可以直接输入,pytorch会自动调整
        # 但是其batch_first参数的设置会影响LSTM层内部数据的处理方式,从而影响模型的训练效果
        # batch_first=True时,可能会导致LSTM在处理长序列时更有效
        # batch_first=False时,可能会导致LSTM在处理短序列时更有效

        pred = model(X)
        loss = loss_fn(pred,y)

        optimizer.zero_grad()
        loss.backward()
        # 梯度裁剪
        torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.68)
        optimizer.step()

        train_loss += loss.item()

    train_loss /= num_batches

    return train_loss
def test (dataloader, model, loss_fn):
    num_batches = len(dataloader)          # 批次数目, (size/batch_size,向上取整)
    test_loss= 0
    
    # 当不进行训练时,停止梯度更新,节省计算内存消耗
    with torch.no_grad():
        for imgs, target in dataloader:
            imgs, target = imgs.to(device), target.to(device)
            
            # 计算loss
            target_pred = model(imgs)
            loss        = loss_fn(target_pred, target)
            
            test_loss += loss.item()

    test_loss /= num_batches

    return test_loss

4、正式训练模型

# Define the loss function and optimizer
loss_fn = nn.MSELoss()
learn_rate = 1e-3
optimizer = torch.optim.Adam(model.parameters(),lr=learn_rate)
import copy 

epochs = 200

train_loss=[]
test_loss=[]

for epoch in range(epochs):

    model.train()
    epoch_train_loss = train(train_loader,model,optimizer,loss_fn)

    model.eval()
    epoch_test_loss = test(test_loader,model,loss_fn)

    train_loss.append(epoch_train_loss)
    test_loss.append(epoch_test_loss)

    lr = optimizer.state_dict()['param_groups'][0]['lr']

    template = ('Epoch:{:2d}, Train_loss:{:.3f}, Test_loss:{:.3f}, Lr:{:.2E}')
    print(template.format(epoch+1, epoch_train_loss, epoch_test_loss, lr))


print('Done')
Epoch: 1, Train_loss:20635.831, Test_loss:66297.108, Lr:1.00E-03
Epoch: 2, Train_loss:18829.208, Test_loss:63532.933, Lr:1.00E-03
Epoch: 3, Train_loss:17670.595, Test_loss:60912.115, Lr:1.00E-03
Epoch: 4, Train_loss:16463.469, Test_loss:58382.553, Lr:1.00E-03
Epoch: 5, Train_loss:15398.968, Test_loss:55925.933, Lr:1.00E-03
Epoch: 6, Train_loss:14261.022, Test_loss:53533.631, Lr:1.00E-03
Epoch: 7, Train_loss:13231.828, Test_loss:51205.521, Lr:1.00E-03
Epoch: 8, Train_loss:12314.483, Test_loss:48937.172, Lr:1.00E-03
Epoch: 9, Train_loss:11434.403, Test_loss:46726.929, Lr:1.00E-03
Epoch:10, Train_loss:10620.793, Test_loss:44571.675, Lr:1.00E-03
Epoch:11, Train_loss:9726.007, Test_loss:42470.488, Lr:1.00E-03
Epoch:12, Train_loss:9044.311, Test_loss:40422.014, Lr:1.00E-03
Epoch:13, Train_loss:8366.348, Test_loss:38425.752, Lr:1.00E-03
Epoch:14, Train_loss:7727.216, Test_loss:36481.379, Lr:1.00E-03
Epoch:15, Train_loss:7245.731, Test_loss:34473.627, Lr:1.00E-03
Epoch:16, Train_loss:6608.261, Test_loss:32571.641, Lr:1.00E-03
Epoch:17, Train_loss:6160.780, Test_loss:30739.927, Lr:1.00E-03
Epoch:18, Train_loss:5720.138, Test_loss:28968.774, Lr:1.00E-03
Epoch:19, Train_loss:5338.054, Test_loss:27254.684, Lr:1.00E-03
Epoch:20, Train_loss:5041.359, Test_loss:25596.053, Lr:1.00E-03
Epoch:21, Train_loss:4758.801, Test_loss:23995.714, Lr:1.00E-03
Epoch:22, Train_loss:4538.454, Test_loss:22509.405, Lr:1.00E-03
Epoch:23, Train_loss:4429.614, Test_loss:21245.318, Lr:1.00E-03
Epoch:24, Train_loss:4300.475, Test_loss:20142.770, Lr:1.00E-03
Epoch:25, Train_loss:4248.188, Test_loss:19199.139, Lr:1.00E-03
Epoch:26, Train_loss:4179.910, Test_loss:19461.267, Lr:1.00E-03
Epoch:27, Train_loss:2634.756, Test_loss:17759.148, Lr:1.00E-03
Epoch:28, Train_loss:1996.072, Test_loss:16587.582, Lr:1.00E-03
Epoch:29, Train_loss:1746.003, Test_loss:15413.238, Lr:1.00E-03
Epoch:30, Train_loss:1531.197, Test_loss:14248.107, Lr:1.00E-03
Epoch:31, Train_loss:1332.269, Test_loss:13128.375, Lr:1.00E-03
Epoch:32, Train_loss:1143.767, Test_loss:12075.063, Lr:1.00E-03
Epoch:33, Train_loss:1002.286, Test_loss:11076.224, Lr:1.00E-03
Epoch:34, Train_loss:850.997, Test_loss:10158.073, Lr:1.00E-03
Epoch:35, Train_loss:718.919, Test_loss:9335.331, Lr:1.00E-03
Epoch:36, Train_loss:601.860, Test_loss:8519.326, Lr:1.00E-03
Epoch:37, Train_loss:511.991, Test_loss:7720.966, Lr:1.00E-03
Epoch:38, Train_loss:428.137, Test_loss:7051.985, Lr:1.00E-03
Epoch:39, Train_loss:356.029, Test_loss:6445.943, Lr:1.00E-03
Epoch:40, Train_loss:304.774, Test_loss:5913.033, Lr:1.00E-03
Epoch:41, Train_loss:253.031, Test_loss:5382.602, Lr:1.00E-03
Epoch:42, Train_loss:205.955, Test_loss:4926.450, Lr:1.00E-03
Epoch:43, Train_loss:179.406, Test_loss:4532.605, Lr:1.00E-03
Epoch:44, Train_loss:149.479, Test_loss:4186.195, Lr:1.00E-03
Epoch:45, Train_loss:130.584, Test_loss:3811.634, Lr:1.00E-03
Epoch:46, Train_loss:106.846, Test_loss:3508.918, Lr:1.00E-03
Epoch:47, Train_loss:85.672, Test_loss:3221.941, Lr:1.00E-03
Epoch:48, Train_loss:70.628, Test_loss:2974.503, Lr:1.00E-03
Epoch:49, Train_loss:62.967, Test_loss:2765.389, Lr:1.00E-03
Epoch:50, Train_loss:54.679, Test_loss:2614.472, Lr:1.00E-03
Epoch:51, Train_loss:46.520, Test_loss:2457.512, Lr:1.00E-03
Epoch:52, Train_loss:46.700, Test_loss:2330.749, Lr:1.00E-03
Epoch:53, Train_loss:42.089, Test_loss:2187.785, Lr:1.00E-03
Epoch:54, Train_loss:39.379, Test_loss:2060.647, Lr:1.00E-03
Epoch:55, Train_loss:32.344, Test_loss:1976.932, Lr:1.00E-03
Epoch:56, Train_loss:28.527, Test_loss:1879.812, Lr:1.00E-03
Epoch:57, Train_loss:30.393, Test_loss:1791.903, Lr:1.00E-03
Epoch:58, Train_loss:19.974, Test_loss:1697.742, Lr:1.00E-03
Epoch:59, Train_loss:19.505, Test_loss:1627.917, Lr:1.00E-03
Epoch:60, Train_loss:24.052, Test_loss:1549.823, Lr:1.00E-03
Epoch:61, Train_loss:17.172, Test_loss:1471.113, Lr:1.00E-03
Epoch:62, Train_loss:22.835, Test_loss:1411.666, Lr:1.00E-03
Epoch:63, Train_loss:17.834, Test_loss:1358.213, Lr:1.00E-03
Epoch:64, Train_loss:16.190, Test_loss:1306.348, Lr:1.00E-03
Epoch:65, Train_loss:10.257, Test_loss:1267.215, Lr:1.00E-03
Epoch:66, Train_loss:11.787, Test_loss:1220.384, Lr:1.00E-03
Epoch:67, Train_loss:11.928, Test_loss:1173.339, Lr:1.00E-03
Epoch:68, Train_loss:10.096, Test_loss:1140.230, Lr:1.00E-03
Epoch:69, Train_loss:12.373, Test_loss:1109.372, Lr:1.00E-03
Epoch:70, Train_loss:8.956, Test_loss:1082.618, Lr:1.00E-03
Epoch:71, Train_loss:10.484, Test_loss:1053.763, Lr:1.00E-03
Epoch:72, Train_loss:9.285, Test_loss:1028.577, Lr:1.00E-03
Epoch:73, Train_loss:10.079, Test_loss:1005.900, Lr:1.00E-03
Epoch:74, Train_loss:8.466, Test_loss:984.480, Lr:1.00E-03
Epoch:75, Train_loss:11.057, Test_loss:958.131, Lr:1.00E-03
Epoch:76, Train_loss:9.518, Test_loss:935.754, Lr:1.00E-03
Epoch:77, Train_loss:7.988, Test_loss:917.656, Lr:1.00E-03
Epoch:78, Train_loss:12.262, Test_loss:893.840, Lr:1.00E-03
Epoch:79, Train_loss:10.581, Test_loss:876.436, Lr:1.00E-03
Epoch:80, Train_loss:8.653, Test_loss:857.843, Lr:1.00E-03
Epoch:81, Train_loss:7.925, Test_loss:841.759, Lr:1.00E-03
Epoch:82, Train_loss:8.069, Test_loss:829.125, Lr:1.00E-03
Epoch:83, Train_loss:11.105, Test_loss:811.239, Lr:1.00E-03
Epoch:84, Train_loss:6.694, Test_loss:796.871, Lr:1.00E-03
Epoch:85, Train_loss:9.938, Test_loss:782.340, Lr:1.00E-03
Epoch:86, Train_loss:6.365, Test_loss:767.827, Lr:1.00E-03
Epoch:87, Train_loss:8.319, Test_loss:752.654, Lr:1.00E-03
Epoch:88, Train_loss:7.610, Test_loss:732.174, Lr:1.00E-03
Epoch:89, Train_loss:9.572, Test_loss:720.828, Lr:1.00E-03
Epoch:90, Train_loss:8.744, Test_loss:701.906, Lr:1.00E-03
Epoch:91, Train_loss:8.735, Test_loss:679.722, Lr:1.00E-03
Epoch:92, Train_loss:6.629, Test_loss:674.427, Lr:1.00E-03
Epoch:93, Train_loss:7.194, Test_loss:656.849, Lr:1.00E-03
Epoch:94, Train_loss:6.989, Test_loss:636.975, Lr:1.00E-03
Epoch:95, Train_loss:7.029, Test_loss:620.471, Lr:1.00E-03
Epoch:96, Train_loss:8.883, Test_loss:607.685, Lr:1.00E-03
Epoch:97, Train_loss:9.923, Test_loss:581.866, Lr:1.00E-03
Epoch:98, Train_loss:6.538, Test_loss:572.428, Lr:1.00E-03
Epoch:99, Train_loss:8.349, Test_loss:554.513, Lr:1.00E-03
Epoch:100, Train_loss:7.702, Test_loss:544.101, Lr:1.00E-03
Epoch:101, Train_loss:5.517, Test_loss:519.929, Lr:1.00E-03
Epoch:102, Train_loss:5.773, Test_loss:512.505, Lr:1.00E-03
Epoch:103, Train_loss:6.067, Test_loss:495.514, Lr:1.00E-03
Epoch:104, Train_loss:4.833, Test_loss:491.224, Lr:1.00E-03
Epoch:105, Train_loss:7.961, Test_loss:473.603, Lr:1.00E-03
Epoch:106, Train_loss:5.927, Test_loss:452.194, Lr:1.00E-03
Epoch:107, Train_loss:5.463, Test_loss:442.340, Lr:1.00E-03
Epoch:108, Train_loss:5.609, Test_loss:417.340, Lr:1.00E-03
Epoch:109, Train_loss:9.296, Test_loss:395.328, Lr:1.00E-03
Epoch:110, Train_loss:5.264, Test_loss:382.694, Lr:1.00E-03
Epoch:111, Train_loss:4.928, Test_loss:377.233, Lr:1.00E-03
Epoch:112, Train_loss:4.505, Test_loss:376.018, Lr:1.00E-03
Epoch:113, Train_loss:4.843, Test_loss:365.014, Lr:1.00E-03
Epoch:114, Train_loss:4.353, Test_loss:355.474, Lr:1.00E-03
Epoch:115, Train_loss:4.379, Test_loss:331.388, Lr:1.00E-03
Epoch:116, Train_loss:4.028, Test_loss:326.193, Lr:1.00E-03
Epoch:117, Train_loss:3.886, Test_loss:320.433, Lr:1.00E-03
Epoch:118, Train_loss:4.402, Test_loss:317.491, Lr:1.00E-03
Epoch:119, Train_loss:5.047, Test_loss:311.838, Lr:1.00E-03
Epoch:120, Train_loss:5.796, Test_loss:288.153, Lr:1.00E-03
Epoch:121, Train_loss:4.249, Test_loss:294.147, Lr:1.00E-03
Epoch:122, Train_loss:4.291, Test_loss:271.253, Lr:1.00E-03
Epoch:123, Train_loss:3.870, Test_loss:259.648, Lr:1.00E-03
Epoch:124, Train_loss:5.571, Test_loss:255.828, Lr:1.00E-03
Epoch:125, Train_loss:7.542, Test_loss:249.424, Lr:1.00E-03
Epoch:126, Train_loss:4.281, Test_loss:228.701, Lr:1.00E-03
Epoch:127, Train_loss:3.593, Test_loss:228.517, Lr:1.00E-03
Epoch:128, Train_loss:4.849, Test_loss:233.302, Lr:1.00E-03
Epoch:129, Train_loss:2.678, Test_loss:214.361, Lr:1.00E-03
Epoch:130, Train_loss:3.843, Test_loss:229.420, Lr:1.00E-03
Epoch:131, Train_loss:5.235, Test_loss:227.116, Lr:1.00E-03
Epoch:132, Train_loss:3.353, Test_loss:210.958, Lr:1.00E-03
Epoch:133, Train_loss:3.567, Test_loss:201.080, Lr:1.00E-03
Epoch:134, Train_loss:3.009, Test_loss:200.930, Lr:1.00E-03
Epoch:135, Train_loss:2.630, Test_loss:216.823, Lr:1.00E-03
Epoch:136, Train_loss:4.214, Test_loss:221.735, Lr:1.00E-03
Epoch:137, Train_loss:3.353, Test_loss:186.374, Lr:1.00E-03
Epoch:138, Train_loss:2.607, Test_loss:214.251, Lr:1.00E-03
Epoch:139, Train_loss:2.212, Test_loss:196.602, Lr:1.00E-03
Epoch:140, Train_loss:3.035, Test_loss:186.465, Lr:1.00E-03
Epoch:141, Train_loss:2.845, Test_loss:210.534, Lr:1.00E-03
Epoch:142, Train_loss:3.348, Test_loss:199.425, Lr:1.00E-03
Epoch:143, Train_loss:2.547, Test_loss:201.113, Lr:1.00E-03
Epoch:144, Train_loss:1.970, Test_loss:193.439, Lr:1.00E-03
Epoch:145, Train_loss:3.030, Test_loss:191.147, Lr:1.00E-03
Epoch:146, Train_loss:2.216, Test_loss:192.023, Lr:1.00E-03
Epoch:147, Train_loss:2.169, Test_loss:192.685, Lr:1.00E-03
Epoch:148, Train_loss:1.994, Test_loss:172.968, Lr:1.00E-03
Epoch:149, Train_loss:2.770, Test_loss:167.034, Lr:1.00E-03
Epoch:150, Train_loss:2.130, Test_loss:176.286, Lr:1.00E-03
Epoch:151, Train_loss:2.506, Test_loss:172.341, Lr:1.00E-03
Epoch:152, Train_loss:1.665, Test_loss:180.329, Lr:1.00E-03
Epoch:153, Train_loss:1.732, Test_loss:163.748, Lr:1.00E-03
Epoch:154, Train_loss:2.366, Test_loss:176.554, Lr:1.00E-03
Epoch:155, Train_loss:1.583, Test_loss:187.248, Lr:1.00E-03
Epoch:156, Train_loss:2.278, Test_loss:172.116, Lr:1.00E-03
Epoch:157, Train_loss:2.383, Test_loss:173.419, Lr:1.00E-03
Epoch:158, Train_loss:1.524, Test_loss:152.646, Lr:1.00E-03
Epoch:159, Train_loss:1.687, Test_loss:169.870, Lr:1.00E-03
Epoch:160, Train_loss:1.767, Test_loss:165.409, Lr:1.00E-03
Epoch:161, Train_loss:1.922, Test_loss:179.328, Lr:1.00E-03
Epoch:162, Train_loss:1.458, Test_loss:167.239, Lr:1.00E-03
Epoch:163, Train_loss:1.961, Test_loss:141.942, Lr:1.00E-03
Epoch:164, Train_loss:1.508, Test_loss:153.061, Lr:1.00E-03
Epoch:165, Train_loss:1.582, Test_loss:146.338, Lr:1.00E-03
Epoch:166, Train_loss:1.552, Test_loss:135.282, Lr:1.00E-03
Epoch:167, Train_loss:1.197, Test_loss:142.602, Lr:1.00E-03
Epoch:168, Train_loss:1.311, Test_loss:156.572, Lr:1.00E-03
Epoch:169, Train_loss:1.642, Test_loss:144.998, Lr:1.00E-03
Epoch:170, Train_loss:1.879, Test_loss:134.414, Lr:1.00E-03
Epoch:171, Train_loss:2.269, Test_loss:138.463, Lr:1.00E-03
Epoch:172, Train_loss:1.781, Test_loss:136.314, Lr:1.00E-03
Epoch:173, Train_loss:1.795, Test_loss:135.326, Lr:1.00E-03
Epoch:174, Train_loss:1.487, Test_loss:122.718, Lr:1.00E-03
Epoch:175, Train_loss:1.544, Test_loss:155.864, Lr:1.00E-03
Epoch:176, Train_loss:1.739, Test_loss:130.433, Lr:1.00E-03
Epoch:177, Train_loss:1.075, Test_loss:144.390, Lr:1.00E-03
Epoch:178, Train_loss:1.438, Test_loss:147.833, Lr:1.00E-03
Epoch:179, Train_loss:1.276, Test_loss:116.244, Lr:1.00E-03
Epoch:180, Train_loss:1.161, Test_loss:114.795, Lr:1.00E-03
Epoch:181, Train_loss:1.565, Test_loss:125.655, Lr:1.00E-03
Epoch:182, Train_loss:1.339, Test_loss:111.128, Lr:1.00E-03
Epoch:183, Train_loss:1.753, Test_loss:113.491, Lr:1.00E-03
Epoch:184, Train_loss:1.373, Test_loss:120.987, Lr:1.00E-03
Epoch:185, Train_loss:1.343, Test_loss:113.305, Lr:1.00E-03
Epoch:186, Train_loss:0.947, Test_loss:105.335, Lr:1.00E-03
Epoch:187, Train_loss:1.363, Test_loss:92.125, Lr:1.00E-03
Epoch:188, Train_loss:1.680, Test_loss:103.017, Lr:1.00E-03
Epoch:189, Train_loss:0.978, Test_loss:96.335, Lr:1.00E-03
Epoch:190, Train_loss:0.987, Test_loss:94.008, Lr:1.00E-03
Epoch:191, Train_loss:0.949, Test_loss:119.769, Lr:1.00E-03
Epoch:192, Train_loss:1.196, Test_loss:82.924, Lr:1.00E-03
Epoch:193, Train_loss:0.978, Test_loss:90.593, Lr:1.00E-03
Epoch:194, Train_loss:1.298, Test_loss:92.257, Lr:1.00E-03
Epoch:195, Train_loss:0.925, Test_loss:99.024, Lr:1.00E-03
Epoch:196, Train_loss:1.341, Test_loss:94.766, Lr:1.00E-03
Epoch:197, Train_loss:1.194, Test_loss:88.215, Lr:1.00E-03
Epoch:198, Train_loss:1.547, Test_loss:98.698, Lr:1.00E-03
Epoch:199, Train_loss:0.989, Test_loss:93.121, Lr:1.00E-03
Epoch:200, Train_loss:0.902, Test_loss:80.574, Lr:1.00E-03
Done

四、模型评估

import matplotlib.pyplot as plt

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['figure.dpi'] = 100

epochs_range = range(epochs)
plt.plot(epochs_range,train_loss,label = 'Training Loss')
plt.plot(epochs_range,test_loss,label = 'Test Loss')
plt.legend(loc = 'upper right')
plt.title('Training and Test Loss')
plt.show()

在这里插入图片描述

调用模型进行预测

Xtest = X_test_tensor.to(device)
model.eval()
predicted_y_lstm = model(Xtest)
# 将预测结果和真实值都转移到 CPU 上,并转换为 numpy 数组
predicted_y_lstm = predicted_y_lstm.detach().cpu().numpy()
y_test_one = [i[0] for i in y_test_tensor]
predicted_y_lstm_one = [i[0] for i in predicted_y_lstm]
plt.figure(figsize=(5,3),dpi=120)
plt.plot(y_test_one,color='red',label='真实值')
plt.plot(predicted_y_lstm_one,color='blue',label='预测值')
plt.title('LSTM预测结果')
plt.xlabel('X')
plt.ylabel('Y')
plt.legend()
plt.show()

R2值评估

from sklearn import metrics
RESE_lstm = metrics.mean_squared_error(predicted_y_lstm_one,y_test_tensor)**0.5
R2_lstm = metrics.r2_score(predicted_y_lstm_one,y_test_tensor)

print('均方根误差:%.5f' % RESE_lstm)
print('R2:%.5f' % R2_lstm)
均方根误差:7.19652
R2:0.63034

总结

  • 与RNN相比,LSTM的pytorch代码是将nn.RNN()变为了,nn.LSTM(),参数没有变化
  • 从原理上讲,LSTM是RNN的进阶版,LSTM增加了多个状态
    • 输入门输入靠近记忆单元的信息以及原始信息
    • 遗忘门用来遗忘不需要的信息
    • 输出门用来输出传递到下一单元的信息
  • LSTM相比其RNN可以记忆更长远的信息;并且可以有效解决梯度爆炸问题
  • 我的输出模型预测结果在开始使较准确,到后面开始出现较大的偏差;比较K同学的代码之后,发现可能是模型的隐藏层大小设置的太小的缘故
  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值