homework8——LSTM和GRU

LSTM和GRU的简单应用

任务目标:Seq2Seq

Train a model to learn: “hello” -> “ohlol”

LSTM

结构:
在这里插入图片描述

LSTM的公式:
在这里插入图片描述

import torch
import torch.nn as nn

# parameters
num_class = 4
input_size = 4
batch_size = 1
hidden_size = 8
num_layers = 2
embedding_size = 10
seq_len = 5

# data
idx2char = ['e', 'h', 'l', 'o']
x_data = [1, 0, 2, 2, 3]
y_data = [3, 1, 2, 3, 2]

# input should be (batchSize, seqLen),Target should be (batchSize*seqLen)
inputs = torch.LongTensor(x_data).view(batch_size, seq_len)  # batch_size在前面
labels = torch.LongTensor(y_data)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.emd = nn.Embedding(input_size, embedding_size)
        self.lstm = nn.LSTM(num_layers=num_layers, input_size=embedding_size, hidden_size=hidden_size, batch_first=True)   # batch size first
        self.fc = nn.Linear(hidden_size, num_class)

    def forward(self, x):
        hidden = torch.zeros(num_layers, batch_size, hidden_size)   # h0
        x = self.emd(x)   # (batch, seqLen, embeddingSize)
        x, hidden = self.lstm(x)
        x = self.fc(x)
        return x.view(-1, num_class)   # 方便nn.CrossEntropyLoss()计算

model = Net()

# loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.05)


for epoch in range(15):
    optimizer.zero_grad()
    outputs = model(inputs)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()

    _, predicted = outputs.max(dim=1)   # 分类最大值的下标
    predicted = predicted.data.numpy()
    print('Predicted string is', ''.join([idx2char[idx] for idx in predicted]), end='')
    print(', Epoch [%d/15] loss=%.4f' % (epoch+1, loss.item()))

GRU

结构和公式:
在这里插入图片描述

import torch
import torch.nn as nn

# parameters
num_class = 4
input_size = 4
batch_size = 1
hidden_size = 8
embedding_size = 10
num_layers = 2
seq_len = 5

# data
idx2char = ['e', 'h', 'l', 'o']
x_data = [1, 0, 2, 2, 3]
y_data = [3, 1, 2, 3, 2]

# input should be (batchSize, seqLen),Target should be (batchSize*seqLen)
inputs = torch.LongTensor(x_data).view(seq_len, batch_size)   # seqLen在前面
labels = torch.LongTensor(y_data)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.emd = nn.Embedding(input_size, embedding_size)
        self.gru = nn.GRU(num_layers=num_layers, input_size=embedding_size, hidden_size=hidden_size)   # batch size False
        self.fc = nn.Linear(hidden_size, num_class)

    def forward(self, x):
        hidden = torch.zeros(num_layers, batch_size, hidden_size)  # h0
        x = self.emd(x)     # (batch, seqLen, embeddingSize)
        x, hidden = self.gru(x, hidden)
        x = self.fc(x)
        return x.view(-1, num_class)   # # 方便nn.CrossEntropyLoss()计算

model = Net()

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.05)

for epoch in range(15):
    optimizer.zero_grad()
    outputs = model(inputs)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()

    _, predicted = outputs.max(dim=1)
    print('Predicted string is', ''.join([idx2char[idx] for idx in predicted]), end='')
    print(', Epoch [%d/15] loss=%.4f' % (epoch+1, loss.item()))
  • 2
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值