刘二 basic rnn 课堂代码以及课后作业(待补

感觉读研只能调包,感觉好无聊啊,怀念本科打acm的时光~
without embedding:

import torch

num_layers = 1
seq_len = 5
batch_size = 1
input_size = 4
hidden_size = 4

idx2char = ['e', 'h', 'l', 'o']
x_data = [1, 0, 2, 2, 3]
#y_data = [3, 1, 2, 3, 2]
y_data = [1, 0, 2, 2, 3]

one_hot_lookup=[[1, 0, 0, 0],
                [0, 1, 0, 0],
                [0, 0, 1, 0],
                [0, 0, 0, 1]]
x_one_hot = [one_hot_lookup[x] for x in x_data]

#inputs = torch.Tensor(x_one_hot).view(-1, batch_size, input_size)
#labels = torch.LongTensor(y_data).view(-1, 1)
inputs = torch.Tensor(x_one_hot).view(seq_len, batch_size, input_size)
labels = torch.LongTensor(y_data)

class Model(torch.nn.Module):
    def __init__(self, input_size, hidden_size, batch_size, num_layers=1):
        super(Model, self).__init__()
        self.num_layers = num_layers
        self.batch_size = batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        # self.rnncell = torch.nn.RNNCell(input_size=self.input_size,
        #                             hidden_size=self.hidden_size)
        self.rnn = torch.nn.RNN(input_size=self.input_size,
                                hidden_size=self.hidden_size,
                                num_layers=self.num_layers)

    def forward(self, input):
        #hidden = self.rnncell(input, hidden)
        hidden = torch.zeros(self.num_layers,
                        self.batch_size, self.hidden_size)
        out,_ = self.rnn(input, hidden)
        return out.view(-1, self.hidden_size)
       # return hidden
    
    def init_hidden(self):
        return torch.zeros(self.batch_size, self.hidden_size)

net = Model(input_size, hidden_size, batch_size)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.1)

for epoch in range(10):
    optimizer.zero_grad()
    outputs = net(inputs)
    _, idx = outputs.max(dim=1)
    for tmp in idx:
        print(idx2char[tmp.item()], end='')

    loss = criterion(outputs, labels)
    print(', Epoch [%d/10] loss = %.4f' % (epoch+1, loss.item()))

    loss.backward()
    optimizer.step()

    # loss = 0
    # optimizer.zero_grad()
    # hidden = net.init_hidden()
    # print('Predicted string: ', end='')
    # for input, label in zip(inputs, labels):
    #     hidden = net(input, hidden)
    #     loss += criterion(hidden, label)
    #     _, idx = hidden.max(dim=1)
    #     print(idx2char[idx.item()], end='')
    # loss.backward()
    # optimizer.step()
    # print(', Epoch [%d/100] loss = %.4f' % (epoch+1, loss.item()))

with embedding:
这老师上课给的代码都是错的,估计自己也没敲,就脑子里过了一遍

import torch

num_layers = 1
seq_len = 5
batch_size = 1
input_size = 4
hidden_size = 4
embedding_size = 3 #???
num_class = 4

idx2char = ['e', 'h', 'l', 'o']
x_data = [3, 1, 0, 2, 2]
y_data = [1, 0, 2, 2, 3]

one_hot_lookup=[[1, 0, 0, 0],
                [0, 1, 0, 0],
                [0, 0, 1, 0],
                [0, 0, 0, 1]]
x_one_hot = [one_hot_lookup[x] for x in x_data]

inputs = torch.Tensor(x_data).int()
labels = torch.LongTensor(y_data)

class Model(torch.nn.Module):
    def __init__(self, input_size, hidden_size, batch_size, embedding_size, seq_len, num_layers=1):
        super(Model, self).__init__()
        self.num_layers = num_layers
        self.batch_size = batch_size
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.seq_len = seq_len
        self.embedding_size = embedding_size
        self.emb = torch.nn.Embedding(num_embeddings=self.input_size, embedding_dim=self.embedding_size)
        self.rnn = torch.nn.RNN(input_size=self.embedding_size,
                                hidden_size=self.hidden_size,
                                num_layers=self.num_layers,
                                )
        self.fc = torch.nn.Linear(hidden_size, num_class)

    def forward(self, x):
        hidden = torch.zeros(self.num_layers,
                        self.batch_size, self.hidden_size)
        print(x.shape)
        x = self.emb(x) #(seq_len) -> (seq_len, embedding_size)
        x = x.view(self.seq_len, self.batch_size, self.embedding_size)
        x, _ = self.rnn(x, hidden)
        x = self.fc(x)
        return x.view(-1, num_class)


net = Model(input_size, hidden_size, batch_size, embedding_size, seq_len)

criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.1)



for epoch in range(10):
    optimizer.zero_grad()
    outputs = net(inputs)
    _, idx = outputs.max(dim=1)
    print('predicted string: ', ''.join(idx2char[x.item()] for x in idx), end='')

    loss = criterion(outputs, labels)
    print(', Epoch [%d/10] loss = %.4f' % (epoch+1, loss.item()))

    loss.backward()
    optimizer.step()



输出:

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值