针对复合函数的LSTM时序预测

50 篇文章 4 订阅
40 篇文章 2 订阅
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.nn as nn
import math
import numpy as np
import matplotlib.pyplot as plt
import random


#X_data = np.array([x for x in np.linspace(-2, 5, num=200)]).reshape(-1, 1)

#Y_data = np.array([(math.e ** (-x)) * math.sin(x) + 0.1 *random.uniform(1, 6) for x in np.linspace(-2, 5, num=200)]).reshape(-1, 1)


X = torch.unsqueeze(torch.linspace(-2, 5, 100), dim=1)
print(X)
y = torch.tensor(np.array([(math.e ** (-x)) * math.sin(x) for x in np.linspace(-2, 5, num=100)])).float()

plt.scatter(X, y)
plt.show()


# 将tensor置入Variable中
x = Variable(X, requires_grad=True)
y = Variable(y)

# plt.scatter(x.data.numpy(), y.data.numpy())
# plt.show()

# 定义一个构建神经网络的类
class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()

        self.rnn = nn.RNN(
            input_size=1,
            hidden_size=32,  # rnn hidden unit
            num_layers=1,  # number of rnn layer
            batch_first=True,  # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
        )
        self.out = nn.Linear(32, 1)

    def forward(self, x, h_state):
        # x (batch, time_step, input_size)
        # h_state (n_layers, batch, hidden_size)
        # r_out (batch, time_step, hidden_size)
        r_out, h_state = self.rnn(x, h_state)

        outs = []  # save all predictions
        for time_step in range(r_out.size(1)):  # calculate output for each time step
            outs.append(self.out(r_out[:, time_step, :]))
        return torch.stack(outs, dim=1), h_state

        # instead, for simplicity, you can replace above codes by follows
        # r_out = r_out.view(-1, 32)
        # outs = self.out(r_out)
        # outs = outs.view(-1, TIME_STEP, 1)
        # return outs, h_state

        # or even simpler, since nn.Linear can accept inputs of any dimension
        # and returns outputs with same dimension except for the last
        # outs = self.out(r_out)
        # return outs


rnn = RNN()
print(rnn)

optimizer = torch.optim.Adam(rnn.parameters(), lr=0.01)  # optimize all cnn parameters
loss_func = nn.MSELoss()

h_state = None
#optimizer = torch.optim.SGD(rnn.parameters(), lr=0.1, momentum=0.9)
optimizer = torch.optim.Adam(rnn.parameters(), lr=0.01)  # optimize all parameters
loss_func = nn.MSELoss()  # the target label is not one-hotted

Mats = None
def Mat(x):
    Mats = x
    return Mats

h_state = None
for epoch in range(0, 1):
    for step in range(0, 500):  # gives batch data


        b_y = y.view(-1, 10, 1)
        b_x = X.view(-1, 10, 1)  # reshape x to (batch, time_step, input_size)

        prediction, h_state = rnn(b_x, h_state)  # rnn 对于每个 step 的 prediction, 还有最后一个 step 的 h_state
        # !!  下一步十分重要 !!
        h_state = Variable(h_state.data)


        loss = loss_func(prediction, b_y)  # cross entropy loss

        optimizer.zero_grad()  # clear gradients for this training step
        loss.backward()  # backpropagation, compute gradients
        optimizer.step()  # apply gradients
        loss += loss
        print('step={}, loss={}'.format(step,loss))
        if(step == 499):
            Mat(prediction.data.numpy().flatten())
            plt.plot(prediction.data.numpy().flatten().tolist())
            plt.show()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值