111,文本分类

文本分类2

from torch.utils.data import Dataset,DataLoader
import numpy as np
import torch
import torch.nn as nn

def get_data(path):
    all_text = []
    all_label = []
    with open(path,"r",encoding="utf8") as f:
        all_data = f.read().split("\n")
    for data in all_data:
        try:
            if len(data) == 0:
                continue
            data_s = data.split(" ")
            if len(data_s) != 2:
                continue
            text,label = data_s
            label = int(label)

        except Exception as e:
            print(e)
        else:
            all_text.append(text)
            all_label.append(int(label))

    return all_text,all_label

def build_word2index(train_text):
    word_2_index =  {"PAD":0,"UNK":1}
    for text in train_text:
        for word in text:
            if word not in word_2_index:
                word_2_index[word] = len(word_2_index)
    return word_2_index


class TextDataset(Dataset):
    def __init__(self,all_text,all_lable):
        self.all_text = all_text
        self.all_lable = all_lable

    def __getitem__(self, index):
        global word_2_index
        text = self.all_text[index]
        text_index = [word_2_index[i] for i in text]
        label = self.all_lable[index]
        text_len = len(text)
        return text_index,label,text_len

    def process_batch_batch(self, data):
        global max_len,word_2_index
        batch_text = []
        batch_label = []
        batch_len = []

        for d in data:
            batch_text.append(d[0])
            batch_label.append(d[1])
            batch_len.append(d[2])
        min_len = min(batch_len)
         #先切,再补齐
        batch_text = [i[:max_len] for i in batch_text]


        batch_text = [i + [0]*(max_len-len(i)) for i in batch_text]

        return torch.tensor(batch_text,dtype=torch.float32),torch.tensor(batch_label)


    def __len__(self):
        return len(self.all_text)


class Model(nn.Module):
    def __init__(self,feature,class_num):
        super().__init__()
        self.linear1 = nn.Linear(feature,class_num)
        self.loss_fun = nn.CrossEntropyLoss()


    def forward(self,x,label=None):
        pre = self.linear1.forward(x)
        if label is not None:
            loss = self.loss_fun(pre,label)
            return loss
        else:
            return torch.argmax(pre,dim=-1)
        #dim=-1表示按最后一维进行argmax


if __name__ == "__main__":
    train_text,train_lable = get_data("train.txt")
    assert len(train_lable) == len(train_text),"数据长度都不一样,你玩冒险呢?"

    word_2_index = build_word2index(train_text)
    index_2_label = ["负向情感","中性情感","正向情感"]

    train_batch_size = 2
    max_len = 7
    epoch = 10
    lr = 0.01
    class_num = len(set(train_lable))

    train_dataset = TextDataset(train_text,train_lable)
    train_dataloader = DataLoader(train_dataset,batch_size=train_batch_size,shuffle=False,collate_fn=train_dataset.process_batch_batch)


    model = Model(max_len,class_num)
    opt = torch.optim.SGD(model.parameters(),lr)
    for e in range(epoch):
        print("*" * 100)
        for batch_text,batch_label in train_dataloader:
            # print(batch_text)
            loss = model.forward(batch_text,batch_label)
            loss.backward()
            opt.step()
            opt.zero_grad()
        print(f"loss:{loss:.2f}")

    while True:
        input_t = input("请输入文本:")
        input_t = input_t[:max_len]
        input_idx = [word_2_index.get(i,1) for i in input_t]
        input_idx = input_idx + [0] * (max_len-len(input_t))
        input_idx = torch.tensor(input_idx,dtype=torch.float32)

        pre = model.forward(input_idx)
        pre_label = index_2_label[pre]
        print(f"预测的情感类别为:{pre_label}")

文本分类3

batch_onehot 是list类型,我们先给他转成tensor类型,然后我们将其转化为numpy类型,这样就能够更直观地观察数据了。

2用1个数字代表一个汉字,会存在大小关系,汉字大几千个,矩阵计算的时候会有大小关系计算影响,我们用向量去表示汉字,现在用one_hot表示汉字

from torch.utils.data import Dataset,DataLoader
import numpy as np
import torch
import torch.nn as nn
import os
import time
from tqdm import tqdm

def get_data(path,num=None):
    all_text = []
    all_label = []
    with open(path,"r",encoding="utf8") as f:
        all_dat
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值