# Some part of the code was referenced from below. |
# https://github.com/pytorch/examples/tree/master/word_language_model |
from torch.nn.utils import clip_grad_norm_ |
from data_utils import Dictionary, Corpus |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
embed_size = 128 #一个单词最多多长 |
num_samples = 1000 # number of words to be sampled |
# Load "Penn Treebank" dataset |
ids = corpus.get_data('data/train.txt', batch_size) |
vocab_size = len(corpus.dictionary) #一共有多少个单词 |
num_batches = ids.size(1) // seq_length |
# RNN based language model |
def __init__(self, vocab_size, embed_size, hidden_size, num_layers): |
super(RNNLM, self).__init__() |
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True) |
self.linear = nn.Linear(hidden_size, vocab_size) |
# Embed word ids to vectors |
out, (h, c) = self.lstm(x, h) |
# Reshape output to (batch_size*sequence_length, hidden_size) |
out = out.reshape(out.size(0)*out.size(1), out.size(2)) |
# Decode hidden states of all time steps |
model = RNNLM(vocab_size, embed_size, hidden_size, num_layers).to(device) |
criterion = nn.CrossEntropyLoss() |
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) |
# Truncated backpropagation |
return [state.detach() for state in states] |
for epoch in range(num_epochs): |
# Set initial hidden and cell states |
states = (torch.zeros(num_layers, batch_size, hidden_size).to(device), |
torch.zeros(num_layers, batch_size, hidden_size).to(device)) |
for i in range(0, ids.size(1) - seq_length, seq_length): |
# Get mini-batch inputs and targets |
inputs = ids[:, i:i+seq_length].to(device) |
targets = ids[:, (i+1):(i+1)+seq_length].to(device)#为什么全部加1,因为在语言预测中。下一个单词就是预测目标 |
outputs, states = model(inputs, states) |
loss = criterion(outputs, targets.reshape(-1)) |
clip_grad_norm_(model.parameters(), 0.5) |
step = (i+1) // seq_length |
print ('Epoch [{}/{}], Step[{}/{}], Loss: {:.4f}, Perplexity: {:5.2f}' |
.format(epoch+1, num_epochs, step, num_batches, loss.item(), np.exp(loss.item()))) |
with open('sample.txt', 'w') as f: |
# Set intial hidden ane cell states |
state = (torch.zeros(num_layers, 1, hidden_size).to(device), |
torch.zeros(num_layers, 1, hidden_size).to(device)) |
# Select one word id randomly |
prob = torch.ones(vocab_size) |
input = torch.multinomial(prob, num_samples=1).unsqueeze(1).to(device)#返回的下标,所以随便设置了个全1数组? |
for i in range(num_samples): |
output, state = model(input, state) |
word_id = torch.multinomial(prob, num_samples=1).item() |
# Fill input with sampled word id for the next time step |
word = corpus.dictionary.idx2word[word_id] |
word = '\n' if word == '<eos>' else word + ' ' |
print('Sampled [{}/{}] words and save to {}'.format(i+1, num_samples, 'sample.txt')) |
# Save the model checkpoints |
torch.save(model.state_dict(), 'model.ckpt')