Kaggle-Sentiment Analysis on Movie Reviews
'''Sentiment Analysis on Movie Reviews'''
import math
import torch
from itertools import chain
import pandas as pd
from torch.nn.utils.rnn import pack_padded_sequence
from torch.utils.data import Dataset, DataLoader
import time
import matplotlib.pyplot as plt
class SAData(Dataset):
def __init__(self, train):
# 构建数据样本
self.train = train
self.data = pd.read_csv('../dataset/sentiment-analysis-on-movie-reviews/train.tsv', sep='\t')
if self.train:
# 随机选取80%作为训练集,不可按索引顺序取,数据会不全面
self.data = self.data.sample(frac=0.8, replace=False, random_state=1, axis=0)
# self.data = self.data[:int(self.data.shape[0] * 0.8)]
self.data = self.data.reset_index(drop=True) # 重新生成索引
### 正式训练要训练所有数据 ###
# self.data = self.data
self.len = self.data.shape[0]
else:
# 20%作为验证集
self.data = self.data.sample(frac=0.2, replace=False, random_state=1, axis=0)
# self.data = self.data[int(self.data.shape[0] * 0.8):]
self.data = self.data.reset_index(drop=True) # 重新生成索引
self.len = self.data.shape[0]
self.x_data, self.y_data = self.data['Phrase'], self.data['Sentiment']
def __getitem__(self, index):
# 根据数据索引获取样本
return self.x_data[index], self.y_data[index]
def __len__(self):
# 返回数据长度
return self.len
# 训练集验证集数据对象
train_set = SAData(train=True)
validation_set = SAData(train=False)
# Hyper Parameters
N_CHARS = 128 # ASCII码个数
HIDDEN_SIZE = 128
N_LAYER = 2
BATCH_SIZE = 128
N_EPOCHS = 100
USE_GPU = True
N_CLASS = len(set(train_set.y_data))
# 训练集验证集数据加载对象
train_loader = DataLoader(
dataset=train_set,
batch_size=BATCH_SIZE,
shuffle=True,
# num_workers=2
)
validation_loader = DataLoader(
dataset=validation_set,
batch_size=BATCH_SIZE,
shuffle=False, # 测试集不打乱有利于观察结果
# num_workers=2
)
def time_since(since):
s = time.time() - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def phrase2list(phrase):
arr = [ord(c) for c in phrase] # ord() 返回对应的ASCII码
return arr, len(arr)
def create_tensor(tensor):
if USE_GPU:
device = torch.device('cuda:0')
tensor = tensor.to(device)
return tensor
def make_tensor(phrase, sentiment):
sequences_and_lengths = [phrase2list(phrase) for phrase in phrase] # 名字字符串->字符数组->对应ASCII码
phrase_sequences = [sl[0] for sl in sequences_and_lengths]
seq_lengths = torch.LongTensor([sl[1] for sl in sequences_and_lengths])
sentiment = sentiment.long()
# make tensor of name, batchSize x seqLen
seq_tensor = torch.zeros(len(phrase_sequences), seq_lengths.max()).long()
for idx, (seq, seq_len) in enumerate(zip(phrase_sequences, seq_lengths)): # 填充零
seq_tensor[idx, :seq_len] = torch.LongTensor(seq) # name_sequences不够最大长度的位置补零
# 排序 sort by length to use pack_padded_sequence
seq_lengths, perm_idx = seq_lengths.sort(dim=0, descending=True) # perm_idx表示排完序元素原本的索引
seq_tensor = seq_tensor[perm_idx] # 对补零后的name_sequences按照长度排序
sentiment = sentiment[perm_idx]
return create_tensor(seq_tensor), create_tensor(seq_lengths), create_tensor(sentiment)
class RNNClassifier(torch.nn.Module):
def __init__(self, input_size, hidden_size, output_size, n_layers=1, bidirection=True):
super(RNNClassifier, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.n_directions = 2 if bidirection else 1
self.embedding = torch.nn.Embedding(input_size, hidden_size)
self.gru = torch.nn.GRU(hidden_size, hidden_size, n_layers, bidirectional=bidirection)
self.fc = torch.nn.Linear(hidden_size * self.n_directions, output_size)
def _init_hidden(self, batch_size):
hidden = torch.zeros(self.n_layers * self.n_directions, batch_size, self.hidden_size)
return create_tensor(hidden)
def forward(self, input, seq_lengths):
input = input.t() # 转置 B x S -> S x B
batch_size = input.size(1)
hidden = self._init_hidden(batch_size)
embedding = self.embedding(input)
# 这里的pack,理解成压紧比较好。
# 将一个 填充过的变长序列 压紧。(填充时候,会有冗余,所以压紧一下)
gru_input = pack_padded_sequence(embedding, seq_lengths) # pack them up
output, hidden = self.gru(gru_input, hidden)
if self.n_directions == 2:
hidden_cat = torch.cat([hidden[-1], hidden[-2]], dim=1)
else:
hidden_cat = hidden[-1]
fc_output = self.fc(hidden_cat)
return fc_output
def trainModel():
total_loss = 0
for i, (phrase, sentiment) in enumerate(train_loader, 1):
inputs, seq_lengths, target = make_tensor(phrase, sentiment)
output = classifier(inputs, seq_lengths)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
if i % 10 == 0:
print(f'[{time_since(start)}] Epoch {epoch}', end='')
print(f'[{i * len(inputs)}/{len(train_set)}]', end='')
print(f'loss={total_loss / (i * len(inputs))}')
def evalModel():
correct = 0
total = len(validation_set)
print("Evaluating trained model...")
with torch.no_grad():
for i, (phrase, sentiment) in enumerate(validation_loader, 1):
inputs, seq_lengths, target = make_tensor(phrase, sentiment)
output = classifier(inputs, seq_lengths)
pred = output.max(dim=1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
percent = '%.2f' % (100 * correct / total)
print(f'Test set: Accuracy {correct}/{total} {percent}%')
return correct / total
# 获取测试集
def get_test_set():
test_set = pd.read_csv('../dataset/sentiment-analysis-on-movie-reviews/test.tsv', '\t')
PhraseId = test_set['PhraseId']
Phrase = test_set['Phrase']
return PhraseId, Phrase
# 为测试集写的处理文本函数
def make_tensor_test(phrase):
sequences_and_lengths = [phrase2list(phrase) for phrase in phrase] # 名字字符串->字符数组->对应ASCII码
phrase_sequences = [sl[0] for sl in sequences_and_lengths]
seq_lengths = torch.LongTensor([sl[1] for sl in sequences_and_lengths])
# make tensor of name, batchSize x seqLen
seq_tensor = torch.zeros(len(phrase_sequences), seq_lengths.max()).long()
for idx, (seq, seq_len) in enumerate(zip(phrase_sequences, seq_lengths)): # 填充零
seq_tensor[idx, :seq_len] = torch.LongTensor(seq) # name_sequences不够最大长度的位置补零
# 排序 sort by length to use pack_padded_sequence
seq_lengths, perm_idx = seq_lengths.sort(dim=0, descending=True) # perm_idx表示排完序元素原本的索引
seq_tensor = seq_tensor[perm_idx] # 对补零后的name_sequences按照长度排序
# 因为这里将测试集的每个Batch的文本顺序打乱了,记录原本的顺序org_idx,以便将预测出的结果顺序还原
_, org_idx = perm_idx.sort(descending=False)
return create_tensor(seq_tensor), create_tensor(seq_lengths), org_idx
def predict():
# 使用模型得到结果
PhraseId, Phrase = get_test_set() # 获取测试集
sentiment_list = [] # 定义预测结果列表
batchNum = math.ceil(PhraseId.shape[0] / BATCH_SIZE) # 获取总的Batch数
classifier = torch.load('./results/sentimentAnalyst.pkl')
if USE_GPU:
device = torch.device("cuda:0")
classifier.to(device)
with torch.no_grad():
for i in range(batchNum):
print(i)
if i == batchNum - 1:
phraseBatch = Phrase[BATCH_SIZE * i:] # 处理最后不足BATCH_SIZE的情况
else:
phraseBatch = Phrase[BATCH_SIZE * i:BATCH_SIZE * (i + 1)]
inputs, seq_lengths, org_idx = make_tensor_test(phraseBatch)
output = classifier(inputs, seq_lengths)
sentiment = output.max(dim=1, keepdim=True)[1]
sentiment = sentiment[org_idx].squeeze(1)
sentiment_list.append(sentiment.cpu().numpy().tolist())
sentiment_list = list(chain.from_iterable(sentiment_list)) # 将sentiment_list按行拼成一维列表
result = pd.DataFrame({'PhraseId': PhraseId, 'Sentiment': sentiment_list})
result.to_csv('./results/SA_predict.csv', index=False) # 保存结果
# Main Cycle
if __name__ == '__main__':
classifier = RNNClassifier(N_CHARS, HIDDEN_SIZE, N_CLASS, N_LAYER)
if USE_GPU:
device = torch.device("cuda:0")
classifier.to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001)
start = time.time()
print("Training for %d epochs..." % N_EPOCHS)
acc_list = []
for epoch in range(1, N_EPOCHS + 1):
trainModel()
acc = evalModel()
acc_list.append(acc)
# 保存最优时的模型 ################################
if acc >= max(acc_list):
torch.save(classifier, './results/sentimentAnalyst.pkl')
print('Save Model!')
predict() # 在测试集上预测结果
# Plot Accuracy
epoch = [epoch + 1 for epoch in range(len(acc_list))]
plt.plot(epoch, acc_list)
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.grid()
plt.show()
# 模型训练一段时间会出现的问题:
# RuntimeError: cuDNN error: CUDNN_STATUS_INTERNAL_ERROR
# 大约是因为显存不足
这也是B站刘二大人Pytorch第13讲课后作业
在我自己电脑只训练4个epoch之后就报错了,用保存的模型预测出来的结果,Kaggle score 为 0.60755
多训练几轮效果应该会更好。
欢迎交流!