训练语言模型
用RNN,LSTM,GRU来训练一个语言模型,用于预测单词的下一个词
torchtext基本用法
构建 vocabulary word to index 和 index to word
torch.nn的一些基本模型
RNN的训练技巧
保存和读取模型
import torch
import torch. nn as nn
import torchtext
from torchtext. vocab import Vectors
import numpy as np
import random
USE_CUDA = torch. cuda. is_available( )
random. seed( 53113 )
np. random. seed( 53113 )
torch. manual_seed( 53113 )
if USE_CUDA:
torch. cuda. manual_seed( 53113 )
BATCH_SIZE = 32
EMBEDDING_SIZE = 500
MAX_VOCAB_SIZE = 50000
TEXT = torchtext. data. Field( lower= True )
train, val, test = torchtext. datasets. LanguageModelingDataset. splits(
path= "." ,
train= "text8.train.txt" ,
validation= "text8.dev.txt" ,
test= "text8.test.txt" ,
text_field= TEXT)
TEXT. build_vocab( train, max_size= MAX_VOCAB_SIZE)
print ( "vocabulary size: {}" . format ( len ( TEXT. vocab) ) )
print ( TEXT. vocab. itos[ 0 : 50 ] )
print ( "------" * 10 )
print ( list ( TEXT. vocab. stoi. items( ) ) [ 0 : 50 ] )
VOCAB_SIZE = len ( TEXT. vocab)
train_iter, val_iter, test_iter = torchtext. data. BPTTIterator. splits(
( train, val, test) ,
batch_size= BATCH_SIZE,
device= - 1 ,
bptt_len= 50 ,
repeat= False ,
shuffle= True )
'''
Iterator:标准迭代器
BucketIerator:相比于标准迭代器,会将类似长度的样本当做一批来处理,
因为在文本处理中经常会需要将每一批样本长度补齐为当前批中最长序列的长度,
因此当样本长度差别较大时,使用BucketIerator可以带来填充效率的提高。
除此之外,我们还可以在Field中通过fix_length参数来对样本进行截断补齐操作。
BPTTIterator: 基于BPTT(基于时间的反向传播算法)的迭代器,一般用于语言模型中。
'''
print ( next ( iter ( train_iter) ) )
print ( next ( iter ( val_iter) ) )
print ( next ( iter ( test_iter) ) )
it = iter ( train_iter)
batch = next ( it)
print ( " " . join( [ TEXT. vocab. itos[ i] for i in batch. text[ : , 1 ] ] ) )
print ( " " . join( [ TEXT. vocab. itos[ i] for i in batch. target[ : , 1 ] ] ) )
for j in range ( 5 ) :
print ( j)
print ( " " . join( [ TEXT. vocab. itos[ i] for i in batch. text[ : , j] . data] ) )
print ( j)
print ( " " . join( [ TEXT. vocab. itos[ i] for i in batch. target[ : , j] . data] ) )
for i in range ( 5 ) :
batch = next ( it)
print ( i)
print ( " " . join( [ TEXT. vocab. itos[ i] for i in batch. text[ : , 2 ] . data] ) )
print ( i)
print ( " " . join( [ TEXT. vocab. itos[ i] for i in batch. target[ : , 2 ] . data] ) )
class RNNModel ( nn. Module) :
def __init__ ( self, rnn_type, vocab_size, embedding_size, hidden_size, nlayers, dropout= 0.5 ) :
''' 该模型包含以下几层:
- 词嵌入层
- 一个循环神经网络层(RNN, LSTM, GRU)
- 一个线性层,从hidden state到输出单词表
- 一个dropout层,用来做regularization
'''
super ( RNNModel, self) . __init__( )
self. drop = nn. Dropout( dropout)
self. embedding = nn. Embedding( vocab_size, embedding_size)
if rnn_type in [ 'LSTM' , 'GRU' ] :
self. rnn = getattr ( nn, rnn_type) ( embedding_size, hidden_size, nlayers, dropout= dropout)
else :
try :
nonlinearity = { 'RNN_TANH' : 'tanh' , 'RNN_RELU' : 'relu' } [ rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""" )
self. rnn = nn. RNN( embedding_size, hidden_size, nlayers,
nonlinearity= nonlinearity, dropout= dropout)
self. linear = nn. Linear( hidden_size, vocab_size)
self. init_weights( )
self. rnn_type = rnn_type
self. hidden_size = hidden_size
self. nlayers = nlayers
def init_weights ( self) :
initrange = 0.1
self. embedding. weight. data. uniform_( - initrange, initrange)
self. linear. bias. data. zero_( )
self. embedding. weight. data. uniform_( - initrange, initrange)
def forward ( self, input , hidden) :
''' Forward pass:
- word embedding
- 输入循环神经网络
- 一个线性层从hidden state转化为输出单词表
'''
embed = self. drop( self. embedding( input ) )
output, hidden = self. rnn( embed, hidden)
output = self. drop( output)
linear = self. linear( output. view( - 1 , output. size( 2 ) ) )
return linear. view( output. size( 0 ) , output. size( 1 ) , linear. size( 1 ) ) , hidden
def init_hidden ( self, batch_size, requires_grad= True ) :
weight = next ( self. parameters( ) )
if self. rnn_type == 'LSTM' :
return ( weight. new_zeros( ( self. nlayers, batch_size, self. hidden_size) ,
requires_grad= requires_grad) ,
weight. new_zeros( ( self. nlayers, batch_size, self. hidden_size) ,
requires_grad= requires_grad) )
else :
return weight. new_zeros( ( self. nlayers, batch_size, self. hidden_size) ,
requires_grad= requires_grad)
hidden_size = 1000
model = RNNModel( "LSTM" , VOCAB_SIZE, EMBEDDING_SIZE, hidden_size, 2 , dropout= 0.5 )
if USE_CUDA:
model = model. cuda( )
def evaluate ( model, dev_iter) :
model. eval ( )
total_loss = 0 .
it = iter ( data)
total_count = 0 .
with torch. no_grad( ) :
hidden = model. init_hidden( BATCH_SIZE, requires_grad= False )
for i, batch in enumerate ( dev_iter) :
data, target = batch. text, batch. target
if USE_CUDA:
data, target = data. cuda( ) , target. cuda( )
hidden = repackage_hidden( hidden)
with torch. no_grad( ) :
output, hidden = model( data, hidden)
loss = loss_fn( output. view( - 1 , VOCAB_SIZE) , target. view( - 1 ) )
total_count += np. multiply( * data. size( ) )
total_loss += loss. item( ) * np. multiply( * data. size( ) )
loss = total_loss / total_count
model. train( )
return loss
def repackage_hidden ( hidden) :
if isinstance ( hidden, torch. Tensor) :
return hidden. detach( )
else :
return tuple ( repackage_hidden( v) for v in hidden)
loss_fn = nn. CrossEntropyLoss( )
learning_rate = 0.001
optimizer = torch. optim. Adam( model. parameters( ) , lr= learning_rate)
scheduler = torch. optim. lr_scheduler. ExponentialLR( optimizer, 0.5 )
GRAD_CLIP = 1 .
NUM_EPOCHS = 2
val_losses = [ ]
for epoch in range ( NUM_EPOCHS) :
model. train( )
hidden = model. init_hidden( BATCH_SIZE)
for i, batch in enumerate ( train_iter) :
data, target = batch. text, batch. target
if USE_CUDA:
data, target = data. cuda( ) , target. cuda( )
hidden = repackage_hidden( hidden)
optimizer. zero_grad( )
output, hidden = model( data, hidden)
loss = loss_fn( output. view( - 1 , VOCAB_SIZE) , target. view( - 1 ) )
loss. backward( )
torch. nn. utils. clip_grad_norm_( model. parameters( ) , GRAD_CLIP)
optimizer. step( )
if i % 1000 == 0 :
print ( "epoch" , epoch, "iter" , i, "loss" , loss. item( ) )
if i % 10000 == 0 :
val_loss = evaluate( model, val_iter)
if len ( val_losses) == 0 or val_loss < min ( val_losses) :
print ( "best model, val_loss: " , val_loss)
torch. save( model. state_dict( ) , "lm-best.th" )
else :
scheduler. step( )
optimizer = torch. optim. Adam( model. parameters( ) , lr= learning_rate)
val_losses. append( val_loss)
best_model = RNNModel( "LSTM" , VOCAB_SIZE, EMBEDDING_SIZE, hidden_size, 2 , dropout= 0.5 )
if USE_CUDA:
best_model = best_model. cuda( )
best_model. load_state_dict( torch. load( "lm-best.th" ) )
val_loss = evaluate( best_model, val_iter)
print ( "perplexity: " , np. exp( val_loss) )
test_loss = evaluate( best_model, test_iter)
print ( "perplexity: " , np. exp( test_loss) )
hidden = best_model. init_hidden( 1 )
device = torch. device( "cuda" if torch. cuda. is_available( ) else "cpu" )
input = torch. randint( VOCAB_SIZE, ( 1 , 1 ) , dtype= torch. long ) . to( device)
words = [ ]
for i in range ( 100 ) :
output, hidden = best_model( input , hidden)
word_weights = output. squeeze( ) . exp( ) . cpu( )
word_idx = torch. multinomial( word_weights, 1 ) [ 0 ]
input . fill_( word_idx)
word = TEXT. vocab. itos[ word_idx]
words. append( word)
print ( " " . join( words) )