word2vec+gru文本分类实战代码

import pandas as pd
import numpy as np
import torch
from torch import nn
import torch.utils.data as data
import torch.nn.functional as F
from torch import tensor
from sklearn.metrics import f1_score
from datetime import datetime
import time
from collections import Counter
import re
import jieba
from tqdm import tqdm
import os
import gensim
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
import pandas as pd
import numpy as np
import torch
from torch import nn
import torch.utils.data as Data
import torch.nn.functional as F
from torch import tensor
from sklearn.metrics import f1_score
import torch.optim as optim
import matplotlib.pyplot as plt
plt.rcParams["font.sans-serif"] = ['Simhei']
plt.rcParams["axes.unicode_minus"] = False
# pip install gensim -i https://pypi.tuna.tsinghua.edu.cn/simple
# csv数据量的数目测试  一共有20000条
def data_process():# 数据预处理函数
    data=pd.read_excel("美团评论(1).xlsx")
    # print(data.columns)
    train_text_data=data['内容'].values
    temp=data['评价星级'].values
    train_text_data_label=[int(i)-1 for i in temp ]# 从0开始

    return train_text_data[:10000],train_text_data_label[:10000]
# https://blog.csdn.net/qq_43391414/article/details/118557836?.
train_text_data,train_text_data_label=data_process()

# 使用word2vec之前先进行word2vec的语料库训练 只需要训练一次 就可以
with open("word2vec_txt.txt","a+",encoding='utf-8') as f:
    words=[]
    for i in tqdm(train_text_data):
        i = "".join(re.findall('[\u4e00-\u9fa5]', str(i)))
        i = " ".join(list(jieba.cut(i, cut_all=False)))
        f.write(i)
        f.write("\n")
model = Word2Vec(LineSentence(open('word2vec_txt.txt', 'r', encoding='utf-8')),sg=0,size=64,window=3,min_count=1,workers=4)
# 模型保存
model.save('test.model')
# 通过模型加载词向量(recommend)

model_vec = gensim.models.Word2Vec.load('test.model')
dic = model_vec.wv.index2word
print(dic)
print(len(dic))
# print(model.wv['的'])
# print(model.wv.most_similar('的', topn=2))

# 对句子进行切分和统计:进行句子的补齐切断等操作: np.mean(words_len) 取 50
words_len=[]
for line in tqdm(train_text_data):
    line = "".join(re.findall('[\u4e00-\u9fa5]', str(line)))
    line = list(jieba.cut(line, cut_all=False))
    words_len.append(len(line))
print(np.max(words_len),np.min(words_len),np.mean(words_len))

# 对句子进行 进行截断补齐处理--
train_text_data_lines=[]
for i in tqdm(train_text_data):
    i = "".join(re.findall('[\u4e00-\u9fa5]', str(i)))
    i = list(jieba.cut(i, cut_all=False))
    if len(i)<50:
        i=i+["的" for i in range(50-len(i))]
    else:i=i[:50]
    train_text_data_lines.append(i)


# 对句子中的单词进行向提取然后每个句子构成一个矩阵
#先把csv中一个句子拿出来  然后把每个单词去词训练库里面找对应的向量规定对于每个句子矩阵都构建一个50,64的矩阵。
def hang_vectoes_label(i_line,model_vec): # 要读取csv文本第几行的文章数据 会返回一个 列表里面是文章的词向量矩阵和标签值
    meihang_wenben_juzhen=np.zeros((50,64))#,每一个文本定义一个空的全0的2000,300的矩阵
    for idx,a in enumerate(i_line):
        try:
            vec = model_vec.wv[a]  #从词库里面取出这些词对应的向量
        except KeyError:        #当在词向量模型中没有这个单词的向量的时候可以设置错误跳过 然后把这个单词的向量设置为全0
            vec=np.zeros((1,64))
        meihang_wenben_juzhen[idx]=vec
    return meihang_wenben_juzhen

# 构建训练数据  label数据 train_text_data_label
tranin_data_matrx=[]
for line in train_text_data_lines:
    tranin_data_matrx.append(hang_vectoes_label(line,model_vec))

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
Embedding_size = 64
Batch_Size = 16
Kernel = 3
Filter_num = 10
Epoch = 10
Dropout = 0.5
Learning_rate =0.00001
num_classs = 5

class TextCNNDataSet(Data.Dataset):
    def __init__(self, data_inputs, data_targets):
        self.inputs = torch.LongTensor(data_inputs)
        self.label = torch.LongTensor(data_targets)

    def __getitem__(self, index):
        return self.inputs[index], self.label[index]

    def __len__(self):
        return len(self.inputs)


TextCNNDataSet = TextCNNDataSet(np.array(tranin_data_matrx), list(train_text_data_label))
train_size = int(len(tranin_data_matrx) * 0.8)
test_size = len(tranin_data_matrx) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(TextCNNDataSet, [train_size, test_size])

TrainDataLoader = Data.DataLoader(train_dataset, batch_size=Batch_Size, shuffle=True,drop_last=True)
TestDataLoader = Data.DataLoader(test_dataset, batch_size=Batch_Size, shuffle=True,drop_last=True)

class GRU(nn.Module):
    def __init__(self,batch_size):
        super(GRU, self).__init__()
        self.batch_size=batch_size
        self.lstm = nn.GRU(Embedding_size,Embedding_size, num_layers=1, bidirectional=False)  # ,batch_first=True 是使用双向
        self.dropout = nn.Dropout(Dropout)
        self.fc = nn.Linear(Embedding_size, 1)
        self.fc2=nn.Linear(50,num_classs)

    def forward(self, X):
        # batch_size = X.shape[0]
        # embedding_X = self.W(X)  # [batch_size, sequence_length, embedding_size]
        embedding_X= torch.tensor(X, dtype=torch.float)
        # print('embedding_X.shape',embedding_X.shape)
        flatten,_=self.lstm(embedding_X)
        # print("flatten.shape:",type(flatten),flatten.shape)
        output = self.fc(flatten)
        output=output.squeeze(2)
        output=self.fc2(output)
        return output

model = GRU(Batch_Size).to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(),lr=Learning_rate)

def binary_acc(pred, y):
    """
    计算模型的准确率
    :param pred: 预测值
    :param y: 实际真实值
    :return: 返回准确率
    """
    correct = torch.eq(pred, y).float()
    acc = correct.sum() / len(correct)
    return acc.item()

def train():
    avg_acc = []
    model.train()
    for index, (batch_x, batch_y) in enumerate(TrainDataLoader):
        batch_x, batch_y = batch_x.to(device), batch_y.to(device)
        # print("batch_x.shape",batch_x.shape)
        pred = model(batch_x)
        loss = criterion(pred, batch_y)
        # print(loss)
        acc = binary_acc(torch.max(pred, dim=1)[1], batch_y)
        avg_acc.append(acc)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    avg_acc = np.array(avg_acc).mean()
    return avg_acc

def evaluate():
    """
    模型评估
    :param model: 使用的模型
    :return: 返回当前训练的模型在测试集上的结果
    """
    avg_acc = []
    model.eval()  # 进入测试模式
    with torch.no_grad():
        for x_batch, y_batch in TestDataLoader:
            x_batch, y_batch = x_batch.to(device), y_batch.to(device)
            # print("x_batch.shape",x_batch.shape)
            pred = model(x_batch)
            acc = binary_acc(torch.max(pred, dim=1)[1], y_batch)
            avg_acc.append(acc)
    return np.array(avg_acc).mean()


# Training cycle
model_train_acc, model_test_acc = [], []
for epoch in range(Epoch):
    train_acc = train()
    test_acc = evaluate()
    print("epoch = {}, 训练准确率={}".format(epoch + 1, train_acc))
    print("epoch = {}, 测试准确率={}".format(epoch + 1, test_acc))
    model_train_acc.append(train_acc)
    model_test_acc.append(test_acc)

plt.plot(model_train_acc)
plt.plot(model_test_acc)
plt.ylim(ymin=0.5, ymax=1.01)
plt.title("The accuracy of GRU model")
plt.legend(['train', 'test'])
plt.show()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值