model文件夹请访问上传资源
demo0415001.py
"""
word embedding
文本情感分类
数据下载地址:https://ai.stanford.edu/~amaas/data/sentiment/
思路分析:准备数据 构建模型 模型训练 模型评估
"""
from torch.utils.data import DataLoader, Dataset
from demo0421002 import ws, max_len, batch_size
import os
import re
import torch
'''input:字符串 output:单词'''
def tokenlize(content):
filters = ['!', '"', '#', '$', '%', '&', '\(', '\)', '\*', '\+', ',', '-', '\.', '/', ':', ';', '<', '=', '>', '\?',
'@'
, '\[', '\\', '\]', '^', '_', '`', '\{', '\|', '\}', '~', '\t', '\n', '\x97', '\x96', '”', '“', ]
# 好像是正则表达式的内容
re.sub("<.*?>", "", content)
content = re.sub("|".join(filters), '', content)
tokens = [i.strip().lower() for i in content.split()]
return tokens
class ImdbDataset(Dataset):
def __init__(self, train=True):
self.train_data_path = r"D:\pythonProject\aclImdb\train"
self.test_data_path = r"D:\pythonProject\aclImdb\test"
data_path = self.train_data_path if train else self.test_data_path
# 1.把所有的文件名放入列表
temp_data_path = [os.path.join(data_path, "pos"), os.path.join(data_path, "neg")]
self.total_file_path = [] # total_file_path包含所有的文件的路径
for path in temp_data_path:
file_name_list = os.listdir(path) # Python的listdir()方法返回一个列表,其中包含由path指定的目录中的条目的名称。
file_path_list = [os.path.join(path, i) for i in file_name_list if i.endswith(".txt")]
# 把file_name和temp_data_path拼到一起
self.total_file_path.extend(file_path_list)
def __getitem__(self, index):
file_path = self.total_file_path[index]
# 获取label
label_str = file_path.split("\\")[-2]
label = 0 if label_str == "neg" else 1
# 获取内容
content = open(file_path, errors='ignore').read()
tokens = tokenlize(content)
return tokens, label
def __len__(self):
return len(self.total_file_path)
def collate_fn(batch):
"""
Not Understand
:param batch:([tokens,label],[tokens,label])
:return:
"""
content, label = list(zip(*batch))
content = [ws.transform(i, max_len=max_len) for i in content]
content = torch.LongTensor(content)
label = torch.LongTensor(label)
return content, label
def get_dataloader(train=True, batch_size=batch_size):
imdb_dataset = ImdbDataset(train)
data_loader = DataLoader(imdb_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
return data_loader
if __name__ == "__main__":
for idx, (input, target) in enumerate(get_dataloader()):
print(idx)
print(input.size())
print(target.size())
break
demo0415002.py
"""
实现的是:构建词典,实现方法把句子转化为数字序列和其反转
"""
class Word2Sequence:
UNK_TAG = "UNK"
PAD_TAG = "PAD"
UNK = 0
PAD = 1
def __init__(self):
self.dict = {
self.UNK_TAG: self.UNK,
self.PAD_TAG: self.PAD
}
self.inverse_dict = {}
self.count = {} # 统计词频
def fit(self, sentence):
"""把单个句子保存到dict
::param sentence:[word1,word2,word3...]
"""
for word in sentence:
self.count[word] = self.count.get(word, 0) + 1
def build_vocab(self, min=5, max=None, max_feature=None):
"""
生成词典
:param min:最小的次数
:param max: 最大的次数
:param max_feature: 一共保留多少个词语
:return:
"""
# 删除count中词频小于min的word
if min is not None:
self.count = {word: value for word, value in self.count.items() if value > min}
# 删除大于max的词
if max is not None:
self.count = {word: value for word, value in self.count.items() if value < max}
# 限制保留的词语数
if max_feature is not None:
temp = sorted(self.count.items(), key=lambda x: x[-1], reverse=True)[:max_feature]
self.count = dict(temp)
for word in self.count:
self.dict[word] = len(self.dict)
# 得到一个反转的dict的字典
self.inverse_dict = dict(zip(self.dict.values(), self.dict.keys()))
def transform(self, sentence, max_len=None):
"""
把句子转化为序列
:param sentence:[word1,word2,word3...]
:param max_len:int,对句子进行填充或裁剪
:return:
"""
if max_len is not None:
if max_len > len(sentence):
sentence = sentence + [self.PAD_TAG] * (max_len - len(sentence)) # 填充
if max_len < len(sentence):
sentence = sentence[:max_len] # 裁剪
return [self.dict.get(word, self.UNK) for word in sentence]
def inverse_transform(self, indices):
"""
把序列转化为句子
:param indices:[1,2,3,4...]
:return:
"""
return [self.inverse_dict.get(idx) for idx in indices]
def __len__(self):
return len(self.dict)
# if __name__ == "__main__":
# ws = Word2Sequence()
# ws.fit(["我", "是", "谁"])
# ws.fit(["我", "是", "我"])
# ws.build_vocab(min=0)
# print(ws.dict)
#
# ret = ws.transform(["我", "爱", "北京"], max_len=10)
# print(ret)
# ret = ws.inverse_transform(ret)
# print(ret)
demo0421002.py
import pickle
import torch
ws = pickle.load(open("./model/ws.pkl", "rb"))
max_len = 200
batch_size = 512
test_batch_size = 1000
hidden_size = 128
num_layers = 2
bidirectional = True
dropout = 0.4
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
demo0421001.py
from demo0415002 import Word2Sequence
from demo0415001 import tokenlize
import os
import pickle
from tqdm import tqdm
if __name__ == "__main__":
ws = Word2Sequence()
path = r"D:\pythonProject\aclImdb\train"
temp_data_path = [os.path.join(path, "pos"), os.path.join(path, "neg")]
for data_path in temp_data_path:
file_paths = [os.path.join(data_path, file_name) for file_name in os.listdir(data_path) if file_name.endswith("txt")]
for file_path in tqdm(file_paths):
sentence = tokenlize(open(file_path, errors='ignore').read())
ws.fit(sentence)
ws.build_vocab(min=10, max_feature=10000)
pickle.dump(ws, open("./model/ws.pkl", "wb"))
print(len(ws))
demo0421003.py
"""
定义模型
模型优化方法:
# 添加一个新的全连接层作为输出层,激活函数处理
# 把双向的lstm的output传给一个单向的lstm再进行处理
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from demo0421002 import ws, max_len, hidden_size, num_layers, bidirectional, dropout, device, test_batch_size
from demo0415001 import get_dataloader
import os
import numpy as np
from tqdm import tqdm
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.embedding = nn.Embedding(len(ws), 100)
# 加入LSTM
self.lstm = nn.LSTM(input_size=100, hidden_size=hidden_size, num_layers=num_layers,
bidirectional=bidirectional, batch_first=True, dropout=dropout)
self.fc = nn.Linear(hidden_size * 2, 2)
def forward(self, input):
"""
:param input:[batch_size,max_len]
:return:
"""
x = self.embedding(input) # 进行embedding操作,形状:[batch_size,max_len,100]
# x:[batch_size,max_len,(bidirectional) 2 * hidden_size]
# h_n:[(bidirectional) 2 * (num_layers) 2,batch_size,hidden_size]
# c_n:[(bidirectional) 2 * (num_layers) 2,batch_size,hidden_size]
x, (h_n, c_n) = self.lstm(x)
# 获取两个方向最后一次的output,进行concat
output_fw = h_n[-2, :, :] # 正向最后一次的输出
output_bw = h_n[-1, :, :] # 反向最后一次的输出
output = torch.cat([output_fw, output_bw], dim=-1) # [batch_size,hidden_size*2]
out = self.fc(output) # 添加一个新的全连接层作为输出层,激活函数处理
return F.log_softmax(out, dim=-1)
model = MyModel().to(device)
optimizer = Adam(model.parameters(), 0.001)
if os.path.exists("./model/model.pkl"):
model.load_state_dict(torch.load("./model/model.pkl"))
optimizer.load_state_dict(torch.load("./model/optimizer.pkl"))
def train(epoch):
for idx, (input, target) in enumerate(get_dataloader(train=True)):
input = input.to(device)
target = target.to(device)
optimizer.zero_grad()
output = model(input)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
print(epoch, idx, loss.item())
if idx % 100 == 0:
torch.save(model.state_dict(), "./model/model.pkl")
torch.save(optimizer.state_dict(), "./model/optimizer.pkl")
def eval():
loss_list = []
acc_list = []
data_loader = get_dataloader(train=False, batch_size=test_batch_size)
for idx, (input, target) in tqdm(enumerate(data_loader), total=len(data_loader), desc="测试:"):
input = input.to(device)
target = target.to(device)
with torch.no_grad():
output = model(input)
cur_loss = F.nll_loss(output, target)
loss_list.append(cur_loss.cpu().item())
# 计算准确率
pred = output.max(dim=-1)[-1]
cur_acc = pred.eq(target).float().mean()
acc_list.append(cur_acc.cpu().item())
print("total loss,acc:", np.mean(loss_list), np.mean(acc_list))
if __name__ == "__main__":
# for i in range(10):
# train(i)
eval()