language_model

language_model

# Import necessary packages.
import torch
import torch.nn as nn
import numpy as np
from torch.nn.utils import clip_grad_norm_
from data_utils import Dictionary, Corpus
# Device configuration.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
cuda
# Hyper-parameters.
embed_size = 128
hidden_size = 1024
num_layers = 1
num_epochs = 5
num_samples = 1000  # number of words to be sampled.
batch_size = 20
seq_length = 30
learning_rate = 0.002

# Load "penn Treebank" dataset.
corpus = Corpus()
ids = corpus.get_data('./data/train.txt', batch_size)
vocab_size = len(corpus.dictionary)
num_batches = ids.size(1) // seq_length
# RNN based language model.
class RNNLM(nn.Module):
    def __init__(self, vocab_size, embed_size, hidden_size, num_layers):
        super(RNNLM, self).__init__()
        self.embed = nn.Embedding(vocab_size, embed_size)
        self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
        self.linear = nn.Linear(hidden_size, vocab_size)

    def forward(self, x, h):
        # Embed word ids to vectors.
        x = self.embed(x)

        # Forward propagate LSTM.
        out, (h, c) = self.lstm(x, h)

        # Reshape output to (batch_size*sequence_length, hidden_size).
        out = out.reshape(out.size(0)*out.size(1), out.size(2))

        # Decode hidden states of all time steps.
        out = self.linear(out)
        return out, (h, c)
    
model = RNNLM(vocab_size, embed_size, hidden_size, num_layers).to(device)
# Loss and optimizer.
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Truncated backpropagation.
def detach(states):
    return [state.detach() for state in states]
# Train the model.
for epoch in range(num_epochs):
    # Set initial hidden and cell states.
    states = (torch.zeros(num_layers, batch_size, hidden_size).to(device),
              torch.zeros(num_layers, batch_size, hidden_size).to(device))
    
    for i in range(0, ids.size(1) - seq_length, seq_length):
        # Get mini-batch inputs and targets.
        inputs = ids[:, i:i+seq_length].to(device)
        targets = ids[:, (i+1):(i+1)+seq_length].to(device)

        # Forward pass.
        states = detach(states)
        outputs, states = model(inputs, states)
        loss = criterion(outputs, targets.reshape(-1))

        # Backward and optimize.
        optimizer.zero_grad()
        loss.backward()
        clip_grad_norm_(model.parameters(), 0.5)
        optimizer.step()

        step = (i+1) // seq_length
        if step % 100 == 0:
            print ('Epoch [{}/{}], Step[{}/{}], Loss: {:.4f}, Perplexity: {:5.2f}'
                   .format(epoch+1, num_epochs, step, num_batches, loss.item(), np.exp(loss.item())))
Epoch [1/5], Step[0/1549], Loss: 9.0020, Perplexity: 8119.07
Epoch [1/5], Step[100/1549], Loss: 6.0340, Perplexity: 417.37
Epoch [1/5], Step[200/1549], Loss: 5.9026, Perplexity: 365.99
Epoch [1/5], Step[300/1549], Loss: 5.7789, Perplexity: 323.41
Epoch [1/5], Step[400/1549], Loss: 8.7098, Perplexity: 6062.29
Epoch [1/5], Step[500/1549], Loss: 5.1095, Perplexity: 165.58
Epoch [1/5], Step[600/1549], Loss: 5.1577, Perplexity: 173.77
Epoch [1/5], Step[700/1549], Loss: 5.3403, Perplexity: 208.59
Epoch [1/5], Step[800/1549], Loss: 5.1845, Perplexity: 178.48
Epoch [1/5], Step[900/1549], Loss: 5.0772, Perplexity: 160.32
Epoch [1/5], Step[1000/1549], Loss: 5.0740, Perplexity: 159.81
Epoch [1/5], Step[1100/1549], Loss: 5.3616, Perplexity: 213.06
Epoch [1/5], Step[1200/1549], Loss: 5.1875, Perplexity: 179.02
Epoch [1/5], Step[1300/1549], Loss: 5.1224, Perplexity: 167.74
Epoch [1/5], Step[1400/1549], Loss: 4.8400, Perplexity: 126.47
Epoch [1/5], Step[1500/1549], Loss: 5.0945, Perplexity: 163.13
Epoch [2/5], Step[0/1549], Loss: 5.3525, Perplexity: 211.13
Epoch [2/5], Step[100/1549], Loss: 4.5566, Perplexity: 95.26
Epoch [2/5], Step[200/1549], Loss: 4.7101, Perplexity: 111.07
Epoch [2/5], Step[300/1549], Loss: 4.6686, Perplexity: 106.55
Epoch [2/5], Step[400/1549], Loss: 4.6247, Perplexity: 101.98
Epoch [2/5], Step[500/1549], Loss: 4.1258, Perplexity: 61.92
Epoch [2/5], Step[600/1549], Loss: 4.4210, Perplexity: 83.18
Epoch [2/5], Step[700/1549], Loss: 4.3747, Perplexity: 79.42
Epoch [2/5], Step[800/1549], Loss: 4.3727, Perplexity: 79.26
Epoch [2/5], Step[900/1549], Loss: 4.2016, Perplexity: 66.79
Epoch [2/5], Step[1000/1549], Loss: 4.2826, Perplexity: 72.43
Epoch [2/5], Step[1100/1549], Loss: 4.5315, Perplexity: 92.90
Epoch [2/5], Step[1200/1549], Loss: 4.4749, Perplexity: 87.78
Epoch [2/5], Step[1300/1549], Loss: 4.2947, Perplexity: 73.31
Epoch [2/5], Step[1400/1549], Loss: 3.9544, Perplexity: 52.16
Epoch [2/5], Step[1500/1549], Loss: 4.2394, Perplexity: 69.37
Epoch [3/5], Step[0/1549], Loss: 4.3837, Perplexity: 80.13
Epoch [3/5], Step[100/1549], Loss: 3.8497, Perplexity: 46.98
Epoch [3/5], Step[200/1549], Loss: 4.0854, Perplexity: 59.47
Epoch [3/5], Step[300/1549], Loss: 4.0053, Perplexity: 54.89
Epoch [3/5], Step[400/1549], Loss: 3.8960, Perplexity: 49.21
Epoch [3/5], Step[500/1549], Loss: 3.3992, Perplexity: 29.94
Epoch [3/5], Step[600/1549], Loss: 3.7645, Perplexity: 43.14
Epoch [3/5], Step[700/1549], Loss: 3.7200, Perplexity: 41.26
Epoch [3/5], Step[800/1549], Loss: 3.7403, Perplexity: 42.11
Epoch [3/5], Step[900/1549], Loss: 3.4453, Perplexity: 31.35
Epoch [3/5], Step[1000/1549], Loss: 3.5493, Perplexity: 34.79
Epoch [3/5], Step[1100/1549], Loss: 3.7659, Perplexity: 43.20
Epoch [3/5], Step[1200/1549], Loss: 3.7596, Perplexity: 42.93
Epoch [3/5], Step[1300/1549], Loss: 3.5639, Perplexity: 35.30
Epoch [3/5], Step[1400/1549], Loss: 3.1878, Perplexity: 24.23
Epoch [3/5], Step[1500/1549], Loss: 3.5535, Perplexity: 34.94
Epoch [4/5], Step[0/1549], Loss: 3.5054, Perplexity: 33.30
Epoch [4/5], Step[100/1549], Loss: 3.3004, Perplexity: 27.12
Epoch [4/5], Step[200/1549], Loss: 3.4906, Perplexity: 32.81
Epoch [4/5], Step[300/1549], Loss: 3.4690, Perplexity: 32.10
Epoch [4/5], Step[400/1549], Loss: 3.3647, Perplexity: 28.93
Epoch [4/5], Step[500/1549], Loss: 2.8826, Perplexity: 17.86
Epoch [4/5], Step[600/1549], Loss: 3.2744, Perplexity: 26.43
Epoch [4/5], Step[700/1549], Loss: 3.2425, Perplexity: 25.60
Epoch [4/5], Step[800/1549], Loss: 3.3238, Perplexity: 27.77
Epoch [4/5], Step[900/1549], Loss: 2.9141, Perplexity: 18.43
Epoch [4/5], Step[1000/1549], Loss: 3.1372, Perplexity: 23.04
Epoch [4/5], Step[1100/1549], Loss: 3.2814, Perplexity: 26.61
Epoch [4/5], Step[1200/1549], Loss: 3.3722, Perplexity: 29.14
Epoch [4/5], Step[1300/1549], Loss: 3.0188, Perplexity: 20.47
Epoch [4/5], Step[1400/1549], Loss: 2.7177, Perplexity: 15.15
Epoch [4/5], Step[1500/1549], Loss: 3.0099, Perplexity: 20.29
Epoch [5/5], Step[0/1549], Loss: 2.9856, Perplexity: 19.80
Epoch [5/5], Step[100/1549], Loss: 2.9412, Perplexity: 18.94
Epoch [5/5], Step[200/1549], Loss: 3.2269, Perplexity: 25.20
Epoch [5/5], Step[300/1549], Loss: 3.0336, Perplexity: 20.77
Epoch [5/5], Step[400/1549], Loss: 2.9810, Perplexity: 19.71
Epoch [5/5], Step[500/1549], Loss: 2.5540, Perplexity: 12.86
Epoch [5/5], Step[600/1549], Loss: 3.0070, Perplexity: 20.23
Epoch [5/5], Step[700/1549], Loss: 2.9668, Perplexity: 19.43
Epoch [5/5], Step[800/1549], Loss: 3.0303, Perplexity: 20.70
Epoch [5/5], Step[900/1549], Loss: 2.6511, Perplexity: 14.17
Epoch [5/5], Step[1000/1549], Loss: 2.7972, Perplexity: 16.40
Epoch [5/5], Step[1100/1549], Loss: 2.9061, Perplexity: 18.28
Epoch [5/5], Step[1200/1549], Loss: 3.0783, Perplexity: 21.72
Epoch [5/5], Step[1300/1549], Loss: 2.6582, Perplexity: 14.27
Epoch [5/5], Step[1400/1549], Loss: 2.3963, Perplexity: 10.98
Epoch [5/5], Step[1500/1549], Loss: 2.7160, Perplexity: 15.12
# Test the model.
with torch.no_grad():
    with open('sample.txt', 'w') as f:
        # Set intial hidden ane cell states.
        state = (torch.zeros(num_layers, 1, hidden_size).to(device),
                 torch.zeros(num_layers, 1, hidden_size).to(device))

        # Select one word id randomly.
        prob = torch.ones(vocab_size)
        input = torch.multinomial(prob, num_samples=1).unsqueeze(1).to(device)

        for i in range(num_samples):
            # Forward propagate RNN.
            output, state = model(input, state)

            # Sample a word id.
            prob = output.exp()
            word_id = torch.multinomial(prob, num_samples=1).item()

            # Fill input with sampled word id for the next time step.
            input.fill_(word_id)

            # File write.
            word = corpus.dictionary.idx2word[word_id]
            word = '\n' if word == '<eos>' else word + ' '
            f.write(word)

            if (i+1) % 100 == 0:
                print('Sampled [{}/{}] words and save to {}'.format(i+1, num_samples, 'sample.txt'))

# Save the model checkpoints.
torch.save(model.state_dict(), 'model.ckpt')
Sampled [100/1000] words and save to sample.txt
Sampled [200/1000] words and save to sample.txt
Sampled [300/1000] words and save to sample.txt
Sampled [400/1000] words and save to sample.txt
Sampled [500/1000] words and save to sample.txt
Sampled [600/1000] words and save to sample.txt
Sampled [700/1000] words and save to sample.txt
Sampled [800/1000] words and save to sample.txt
Sampled [900/1000] words and save to sample.txt
Sampled [1000/1000] words and save to sample.txt
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值