# Defined in Section 4.6.7
#encoding:utf-8
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence
from collections import defaultdict
from vocab import Vocab
from utils import load_pretrained,load_sentence_polarity
#tqdm是一个Python模块,能以进度条的方式显式迭代的进度
from tqdm.auto import tqdm
class LstmDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def collate_fn(examples):
lengths = torch.tensor([len(ex[0]) for ex in examples])
inputs = [torch.tensor(ex[0]) for ex in examples]
targets = torch.tensor([ex[1] for ex in examples], dtype=torch.long)
# 对batch内的样本进行padding,使其具有相同长度
inputs = pad_sequence(inputs, batch_first=True)
return inputs, lengths, targets
#搭建网络
class LSTM(nn.Module):
def __init__(self, sk_vocab, sk_embeddings, hidden_dim, num_class,num_layers):
super(LSTM, self).__init__()
self.num_layers=num_layers
self.hidden_dim=hidden_dim
#与预训练词向量维度保持一致
embedding_dim=sk_embeddings.shape[1]
#词向量层
vocab_size=len(sk_vocab)
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.embeddings.weight.data.uniform_(-0.1,0.1)
#使用预训练词向量对词向量层进行初始化
for idx,token in enumerate(sk_vocab.idx_to_token):
sk_idx=sk_vocab[token]
self.embeddings.weight[idx].data.copy_(sk_embeddings[sk_idx])
#词向量层-》隐含层
self.lstm = nn.LSTM(embedding_dim, self.hidden_dim, batch_first=True,num_layers=self.num_layers,bidirectional=True)
#隐含层->输出层
self.output = nn.Linear(self.hidden_dim*2, num_class)
def forward(self, inputs, lengths):
embeddings = self.embeddings(inputs)
x_pack = pack_padded_sequence(embeddings, lengths.cpu(), batch_first=True, enforce_sorted=False)
#返回一个符合均值为0,方差为1的正态分布(标准正态分布)中填充随机数的张量
h_0 = torch.randn(self.num_layers*2,batch_size,self.hidden_dim)
c_0 = torch.randn(self.num_layers*2,batch_size,self.hidden_dim)
hidden, (hn, cn) = self.lstm(x_pack,(h_0,c_0))
output_fw = hn[-2,:,:] #正向最后一次的输出
output_bw = hn[-1,:,:] # 反向最后一次的输出
output = torch.cat([output_fw,output_bw],dim=-1)# [batch_size,hidden_size*2]
outputs = self.output(output)
log_probs = F.log_softmax(outputs, dim=-1)
return log_probs
hidden_dim = 256 #隐层中lstm的个数
num_class = 2
batch_size = 32
num_epoch = 15
num_layers=1
#加载预训练的skipgram词向量 词表大小109644 维度 64
#加载数据
train_data, test_data, sk_vocab,sk_embeddings = load_sentence_polarity()
train_dataset = LstmDataset(train_data)
test_dataset = LstmDataset(test_data)
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, collate_fn=collate_fn, shuffle=True)
test_data_loader = DataLoader(test_dataset, batch_size=1, collate_fn=collate_fn, shuffle=False)
#加载模型
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = LSTM(sk_vocab, sk_embeddings, hidden_dim, num_class,num_layers)
model.to(device) #将模型加载到GPU中(如果已经正确安装)
#训练过程
nll_loss = nn.NLLLoss() #交叉熵损失函数
optimizer = optim.Adam(model.parameters(), lr=0.001) #使用Adam优化器
model.train()
for epoch in range(num_epoch):
total_loss = 0
for batch in tqdm(train_data_loader, desc=f"Training Epoch {epoch}"):
inputs, lengths, targets = [x.to(device) for x in batch]
log_probs = model(inputs, lengths)
loss = nll_loss(log_probs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print(f"Loss: {total_loss:.2f}")
#测试过程
acc = 0
for batch in tqdm(test_data_loader, desc=f"Testing"):
inputs, lengths, targets = [x.to(device) for x in batch]
with torch.no_grad():
output = model(inputs, lengths)
acc += (output.argmax(dim=1) == targets).sum().item()
#输出在测试集上的准确率
print(f"Acc: {acc / len(test_data_loader):.2f}")
因为pytorch版本的原因,在1.7以上跑不了,改为lengths.cpu()以后再cpu上跑效率太慢了。然后降低了pytorch的版本为1.5.0,结果又报错了。。。。
报错结果如下:
Traceback (most recent call last):
File "bilstm_sent_polarity.py", line 101, in <module>
log_probs = model(inputs, lengths)
File "/root/miniconda3/envs/myconda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "bilstm_sent_polarity.py", line 61, in forward
hidden, (hn, cn) = self.lstm(x_pack,(h_0,c_0))
File "/root/miniconda3/envs/myconda/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/root/miniconda3/envs/myconda/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 565, in forward
hx = self.permute_hidden(hx, sorted_indices)
File "/root/miniconda3/envs/myconda/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 531, in permute_hidden
return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation)
File "/root/miniconda3/envs/myconda/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 20, in apply_permutation
return tensor.index_select(dim, permutation)
RuntimeError: Expected object of device type cuda but got device type cpu for argument #1 'self' in call to _th_index_select
然后我把forward函数中的h_0,c_0放到gpu上,结果还是报和上面一样的问题。如果遇到相同问题的,可以解决吗?能够在gpu上跑的