PyTorch Lecture12:RNN1 - Basics

原作者给出的代码运行的时候存在一个问题:

TypeError: list indices must be integers or slices, not torch.LongTensor

出错的地方在:sys.stdout.write(idx2char[idx.data[0]])

sys.stdout.write(idx2char[idx.data[0]])

修改为为:

sys.stdout.write(idx2char[int(idx.data[0].numpy())])

就可以了

下面为全部代码

# Lab 12 RNN
import sys
import torch
import torch.nn as nn
from torch.autograd import Variable

torch.manual_seed(777)  # reproducibility
#            0    1    2    3    4
idx2char = ['h', 'i', 'e', 'l', 'o']

# Tech hihell ->ihello
x_data = [0, 1, 0, 2, 3, 3]  # hihell

one_hot_lookup = [[1, 0, 0, 0, 0],  # 0
                  [0, 1, 0, 0, 0],  # 1
                  [0, 0, 1, 0, 0],  # 2
                  [0, 0, 0, 1, 0],  # 3
                  [0, 0, 0, 0, 1]]  # 4

y_data = [1, 0, 2, 3, 3, 4]  # ihello
x_one_hot = [one_hot_lookup[x] for x in x_data]

# As we have one batch of examples,we will change then to Variables only once

inputs = Variable(torch.Tensor(x_one_hot))
labels = Variable(torch.LongTensor(y_data))

num_classes = 5
input_size = 5  # one-hot size
hidden_size = 5  # output from the RNN. 5 to directly predict one-hot
batch_size = 1  # one sentence
sequence_length = 1  # One by one
num_layers = 1  # one-layer rnn


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.rnn = nn.RNN(input_size=input_size,
                          hidden_size=hidden_size, batch_first=True)

    def forward(self, hidden, x):
        # Reshape input (batch first)
        x = x.view(batch_size, sequence_length, input_size)

        # Propagate input through RNN
        # Input:(batch,seq_len,input_size)
        # hidden:(num_layers*num_directions,batch,hidden_size)
        out, hidden = self.rnn(x, hidden)
        return hidden, out.view(-1, num_classes)

    def init_hidden(self):
        # Initialize hidden and cell states
        # (num_layers*num_directions,batch,hidden_size)
        return Variable(torch.zeros(num_layers, batch_size, hidden_size))


# Instantitate RNN model
model = Model()

print(model)

# Set Loss and optimizer function
# CrossEntropyLoss = LogSoftmax+NLLLoss
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)

# Train the model
for epoch in range(100):
    optimizer.zero_grad()
    loss = 0
    hidden = model.init_hidden()

    sys.stdout.write("predicted string:")
    for input, label in zip(inputs, labels):
        # print (input.size())
        hidden, output = model(hidden, input)
        val, idx = output.max(1)
        sys.stdout.write(idx2char[int(idx.data[0].numpy())])
        loss += criterion(output, label)
    print(",epoch:%d,loss:%1.3f" % (epoch + 1, loss.data[0]))
    loss.backward()
    optimizer.step()
print("Learning finished!")

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值