实验二:文本情感分类

import torch
from torch.utils.data import DataLoader,Dataset
from tqdm import tqdm 
import re
import os
import pickle
import torch.nn as nn
import time
D:\English\anaconda3\envs\test\lib\site-packages\tqdm\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
  from .autonotebook import tqdm as notebook_tqdm
"""数据集搭建"""
def tokenize(content):
    content = re.sub("<.*?>", " ", content)
    filters = [':', '\.', '\t', '\n', '\x93', '\x97', '\x96', '#', '$', '%', '&']
    content = re.sub("|".join(filters), " ", content)
    tokens = [i.strip().lower() for i in content.split()]
    return tokens
class word_to_sequence():#文本序列化
    def __init__(self):
        self.dict={"UNK":0,"PAD":1}#陌生词,填充词
        self.count={}
    def fit(self,sentence):#统计词语出现的频数
        for word in sentence:
            self.count[word] = self.count.get(word,0)+1
    def build_vocab(self):#生产词典
        self.count = {word:value for word,value in self.count.items() if 5<value<200}
#         temp = sorted(self.count.items(),key=lamba x:x[-1])[0:10000]
#         self.count = dict(temp)
        for word in self.count:#序列化的字典
            self.dict[word] = len(self.dict)
        self.inverse_dict = dict(zip(self.dict.values(),self.dict.keys()))#反序列化的字典
    
    def transform(self,sentence):#句子转序列
        if len(sentence) > 100:
            sentence = sentence[0:100]
        if len(sentence) < 100:
            sentence+=["PAD" ]* (100-len(sentence))
        return [self.dict.get(word,0) for word in sentence ]
    def inverse_transform(self,indices):
        return [self.inverse_dict.get(word)for word in indices]
"""生成词典并保存"""
ws = word_to_sequence()
for file in tqdm([("DataSet/aclImdb/train/neg/"+ i) for i in os.listdir("DataSet/aclImdb/train/neg")]\
                +[("DataSet/aclImdb/train/pos/"+ i) for i in os.listdir("DataSet/aclImdb/train/pos")]):    
        ws.fit(tokenize(open(file,errors='ignore').read().strip())) 
ws.build_vocab() 
print(len(ws.dict))
pickle.dump(ws,open("runs/model/ws.pkl",'wb'))
100%|██████████████████████████████████████████████████████████████████████████| 25000/25000 [00:09<00:00, 2766.20it/s]

32645
class my_dataset(Dataset):
    def __init__(self,train=True):
        super(my_dataset,self).__init__()
        if  train:
            self.total_file_path_list = [("DataSet/aclImdb/train/neg/"+ i) for i in os.listdir("DataSet/aclImdb/train/neg")]\
                +[("DataSet/aclImdb/train/pos/"+ i) for i in os.listdir("DataSet/aclImdb/train/pos")]
        else:
            self.total_file_path_list = [ ("DataSet/aclImdb/test/neg/"+ i) for i in os.listdir("DataSet/aclImdb/test/neg")]\
                +[("DataSet/aclImdb/test/pos/"+ i) for i in os.listdir("DataSet/aclImdb/test/pos")]
    def __getitem__(self,index):
        cut_path = self.total_file_path_list[index]
        cut_filename = os.path.basename(cut_path)
        text=tokenize(open(cut_path,errors='ignore').read().strip()) 
        text = ws.transform(text)
        text = torch.LongTensor(text)
        label=int(cut_filename.split("_")[-1].split(".")[0])-1
        return label,text
    def __len__(self):
        return len(self.total_file_path_list)
test_data = DataLoader(dataset=my_dataset(train=False),batch_size=1000,num_workers=0,shuffle=True,pin_memory=True)
train_data = DataLoader(dataset=my_dataset(train=True),batch_size=128,num_workers=0,shuffle=True,pin_memory=True)
ws = pickle.load(open("runs/model/ws.pkl",'rb'))
"""构建模型"""
"""
    LSTM层用法
    输入数据格式:
    input(seq_len, batch, input_size)
    h0(num_layers * num_directions, batch, hidden_size)显然等于LSTM单元的数量,层数和是否双向都会导致LSTM单元数量翻倍
    c0(num_layers * num_directions, batch, hidden_size)
     
    输出数据格式:
    output(seq_len, batch, hidden_size * num_directions)
    hn(num_layers * num_directions, batch, hidden_size)
    cn(num_layers * num_directions, batch, hidden_size)
 """
class my_net(nn.Module):
    def __init__(self):
        super(my_net,self).__init__()
        self.embedding = nn.Embedding(len(ws.dict),200,padding_idx=1)
        self.lstm = nn.LSTM(input_size=200,hidden_size=128,num_layers=2,batch_first=True,bidirectional = True,dropout=0.5)#200-一个词的向量长度,\
        #64-隐藏层每一层LSTM单元的数量,2为层数,bidirectional是否为双向的LSTM
        self.lstm1 = nn.LSTM(input_size=128*2,hidden_size=128,num_layers=1,batch_first=True,bidirectional = False)
        self.fc1 = nn.Linear(128,10)#(128*2,64)if bidirectional else (128*1,64)
#         self.batch_norm = nn.BatchNorm1d(64)#激活函数后使用,对参数进行规范化处理,加快训练速度,缓解梯度消失或梯度爆炸
#         self.fc2 = nn.Linear(64,10)
    def forward(self,x):
#         print(x.size())
        x = self.embedding(x)#batchsize*100*200
        x,(h_n,c_n) = self.lstm(x)
#         output_fw,output_bw=h_n[-2,:,:],h_n[-1,:,:]#双向输出将结果拼接
#         output = torch.cat([output_fw,output_bw],-1)
#         print(output.size())128*256
        x,(h_n,c_n) = self.lstm1(x)
        out_put = h_n.squeeze(0)#单向输出将第0维挤压掉
        #out_put:bitch_size * hidden_size *  2或1
#         print(out_put.size())
        x = self.fc1(out_put)
#         x = nn.functional.relu(x)
#         x = self.batch_norm(x)
#         x = self.fc2(x)
        return x
model = my_net()
loss_function = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
"""训练"""
"""模型的加载"""
# if os.path.exists("runs/model/aclImdb_net.pt"):
#     model.load_state_dict(torch.load("runs/model/aclImdb_net.pt"))
#     optimizer.load_state_dict(torch.load("runs/model/aclImdb_optimizer.pt"))
start = time.time()
device = torch.device("cuda:0")
model.to(device)
model.train(True)
for epoch in range(10):
    for i,(target,text) in enumerate(train_data):
        optimizer.zero_grad()
        text = text.to(device)
        pred = model(text)
        target=target.to(device)
        loss = loss_function(pred,target)
        loss.backward()
        optimizer.step()
        if i%10 == 0:
            print(f"{epoch},{loss}")
print(time.time() - start)
"""模型的保存"""
os.makedirs("runs/model", exist_ok=True)
torch.save(model.state_dict(),"runs/model/aclImdb_net.pt")#保存模型参数
torch.save(optimizer.state_dict(),"runs/model/aclImdb_optimizer.pt")#保存优化器参数
"""评估"""
model.eval()
loss = 0
correct1,correct2 = 0,0
for (target,text) in test_data:
    target,text = target.to(device),text.to(device)
    with torch.no_grad():
        out = model(text)
        loss+= nn.functional.nll_loss(out,target,reduction="sum").item()
        pred = out.data.max(-1,keepdim=True)[-1]
        correct1+=(pred).eq(target.data.view_as(pred)).sum().item()
        correct2+=(pred>=5).eq(target.data.view_as(pred)>=5).sum().item()
print(loss/len(test_data.dataset),correct1/len(test_data.dataset),correct2/len(test_data.dataset))
-0.9243730639648438 0.22288 0.64636

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

落尘客

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值