lstm轴承寿命预测

1.读取数据集

import numpy as np
import torch
from torch import nn
import matplotlib.pyplot as plt
import pandas as pd
from torch.autograd import Variable

all_date = pd.read_csv(u"D:/故障诊断数据集/西交轴承数据/XJTU-SY_Bearing_Datasets/Data/XJTU-SY_Bearing_Datasets/35Hz12kN/Bearing1_1/all.csv")
all_date['RUL'] = np.arange(0, 1,1/len(all_date))
all_dateNd = all_date.values

2.搭建lstm网络

class LstmRNN(nn.Module):
    """
        Parameters:
        - input_size: feature size
        - hidden_size: number of hidden units
        - output_size: number of output
        - num_layers: layers of LSTM to stack
    """

    def __init__(self, input_size, hidden_size=1, output_size=4, num_layers=1):
        super().__init__()

        self.lstm = nn.LSTM(input_size, hidden_size, num_layers)  # utilize the LSTM model in torch.nn
        self.forwardCalculation = nn.Linear(hidden_size, output_size)

    def forward(self, _x):
        x, _ = self.lstm(_x)  # _x is input, size (seq_len, batch, input_size)
        s, b, h = x.shape  # x is output, size (seq_len, batch, hidden_size)
        x = x.view(s * b, h)
        x = self.forwardCalculation(x)
        x = x.view(s, b, -1)
        return x

3.数据处理和网络训练

if __name__ == '__main__':
    # create database
    data_len = len(all_dateNd)
    data_x = all_dateNd[:,0:2]    #二维输入
    data_y = all_dateNd[:,2]      #输出  

    dataset = np.zeros((data_len, 3))    #用一个dataset表示输入输出
    dataset[:, 0] = data_x[:,0]
    dataset[:, 1] = data_x[:,1]
    dataset[:, 2] = data_y
    dataset = dataset.astype('float32')

    # choose dataset for training and testing
    train_data_ratio = 0.1  # 数据量太大,仅使用10%做训练
    train_data_len = int(data_len * train_data_ratio)
    train_x = dataset[:train_data_len, 0:2]
    train_y = dataset[:train_data_len, 2]
    INPUT_FEATURES_NUM = 2            #输入是2维:水平和垂直振动信号
    OUTPUT_FEATURES_NUM = 1            #输出是一维:寿命

    # test_x = dataset[train_data_len:, 0:2]
    # test_y = dataset[train_data_len:, 2]

    # ----------------- train -------------------
    train_x_tensor = train_x.reshape(-1, 14, INPUT_FEATURES_NUM)  # 每14个做一个batch
    train_y_tensor = train_y.reshape(-1, 14, OUTPUT_FEATURES_NUM)  # set batch size to 14

    # transfer data to pytorch tensor
    train_x_tensor = torch.from_numpy(train_x_tensor)        #数组转tensor
    train_y_tensor = torch.from_numpy(train_y_tensor)
    train_x_tensor_GPU = train_x_tensor.cuda()               #用GPU训练
    train_y_tensor_GPU = train_y_tensor.cuda()

    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")    #用GPU训练网络
    lstm_model = LstmRNN(INPUT_FEATURES_NUM, 16, output_size=OUTPUT_FEATURES_NUM, num_layers=1).to(DEVICE)  # 16 hidden units
    print('LSTM model:', lstm_model)
    print('model.parameters:', lstm_model.parameters)

    loss_function = nn.MSELoss()
    optimizer = torch.optim.Adam(lstm_model.parameters(), lr=0.01)

    max_epochs = 100000
    for epoch in range(max_epochs):
        output = lstm_model(train_x_tensor_GPU)
        loss = loss_function(output, train_y_tensor_GPU)

        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        if loss.item() < 1e-4:
            print('Epoch [{}/{}], Loss: {:.5f}'.format(epoch + 1, max_epochs, loss.item()))
            print("The loss value is reached")
            output = output.view(-1, OUTPUT_FEATURES_NUM).data.cpu().numpy()
            plt.plot(output, 'b')
            break
        elif (epoch + 1) % 100 == 0:
            print('Epoch: [{}/{}], Loss:{:.5f}'.format(epoch + 1, max_epochs, loss.item()))

    # prediction on training dataset
    predictive_y_for_training = lstm_model(train_x_tensor_GPU)
    predictive_y_for_training = predictive_y_for_training.view(-1, OUTPUT_FEATURES_NUM).data.cpu().numpy()

4.绘图

predictive_y_for_training = lstm_model(train_x_tensor_GPU)
predictive_y_for_training = predictive_y_for_training.view(-1, OUTPUT_FEATURES_NUM).data.cpu().numpy()   #tensor转numpy数组

plt.figure()
plt.plot(train_y, 'b')
plt.plot(output, 'y--')
plt.plot(predictive_y_for_training, 'y--')

plt.show()

  • 8
    点赞
  • 78
    收藏
    觉得还不错? 一键收藏
  • 15
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 15
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值