TextCNN_pytorch实现

该博客介绍了如何使用PyTorch实现TextCNN模型,用于文本分类任务。模型包括词嵌入、多个不同大小的卷积核、最大池化和全连接层。通过设置不同的滤波器大小和数量,对输入序列进行卷积操作,然后将多个滤波器的输出拼接,最后通过全连接层得到分类结果。博客提供了详细的代码示例和解释。
摘要由CSDN通过智能技术生成
import numpy as np
import torch
from torch.functional import split
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
"""

filter_list:

Conv2d(1, 3, kernel_size=(2, 4), stride=(1, 1))
    1:表示输入channel为1;
    3:表示输出channel为3;
    kernel_size: 卷积核大小为[2x4];
    stride=(1, 1): 步长为1进行滑动

filter_list:
    '0':Conv2d(1, 3, kernel_size=(2, 4), stride=(1, 1))
    '1':Conv2d(1, 3, kernel_size=(2, 4), stride=(1, 1))
    '2':Conv2d(1, 3, kernel_size=(2, 4), stride=(1, 1))
    len():3

a.permute(2, 0, 1): 若a维度为 [6, 1, 3],
    将a的维度转化为 [3, 6, 1]

"""


class TextCNN(nn.Module):
    def __init__(self):
        super(TextCNN, self).__init__()
        self.num_filters_total = num_filters * len(filter_sizes)  # 3 * 3 = 9
        self.W = nn.Embedding(vocab_size, embedding_size)  # (16, 4)的词表,根据索引得到
        self.Weight = nn.Linear(self.num_filters_total,
                                num_classes,
                                bias=False)  # Weight:[9, 2]  输入9个特征,输出2个特征,二分类
        self.Bias = nn.Parameter(torch.ones(
            [num_classes]))  # 加个偏置,一维长度为2的向量  data:tensor([1., 1.])
        self.filter_list = nn.ModuleList([
            nn.Conv2d(1, num_filters, (size, embedding_size))
            for size in filter_sizes
        ])  # nn.Conv2d(1, 3, (2, 4))

    def forward(self, x):
        embedded_chars = self.W(
            x)  # embedded_chars:[6, 3, 4] 词向量维度为4,输入x为[6, 3]
        # print('embedded_chars_size:{}'.format(embedded_chars.size()))
        embedded_chars = embedded_chars.unsqueeze(
            1)  # 在索引为1的地方扩充一个维度,变为[6, 1, 3, 4]

        pooled_outputs = []

        #  就相当于进行了三个滤波器,均由输入数据进过Conv2d(1, 3, kernel_size=(2, 4), stride=(1, 1))卷积
        #  得到了三个卷积后的结果,也就是输入1,输出3的含义, 输入维度:[6, 3] 输出维度:[6, 1, 1, 3]

        for i, conv in enumerate(self.filter_list):
            # print(i, conv)
            h = F.relu(conv(embedded_chars))  # h: torch.Size([6, 3, 2, 1])
            # print("h_size:{}".format(h.size()))
            mp = nn.MaxPool2d(
                (sequence_length - filter_sizes[i] + 1, 1))  #构建一个维度为 2x1的最大池化

            pooled = mp(h).permute(0, 3, 2, 1)  # mp(h): [6, 3, 1, 1]
            # print("pooled_size:{}".format(
            # pooled.size()))  #pooled_size:torch.Size([6, 1, 1, 3])
            pooled_outputs.append(
                pooled)  # 最终三个卷积后的结果append到pooled_outputs,将三个滤波器卷积后的结果拼接到list中
            # print("pooledn_outputs.size:{}, pooled_outputs:{}".format(
            # len(pooled_outputs), pooled_outputs))

        h_pool = torch.cat(
            pooled_outputs,
            len(filter_sizes))  # shape: [6, 1, 1 ,9],将三个6113cat成6119
        # print("h_pool_size:{}".format(h_pool.size()))
        h_pool_flat = torch.reshape(
            h_pool, [-1, self.num_filters_total])  # reshape为:[6, 9]维度
        # print("h_pool_flat_size:{}".format(h_pool_flat.size()))
        model = self.Weight(
            h_pool_flat) + self.Bias  # 变为 [6, 2] + 长度为2的向量,得到模型的输出
        # print('model:{}'.format(model))
        return model


"""
x.view(a,b,c) 将维度变为 [a, b, c]
"""

if __name__ == '__main__':
    embedding_size = 4
    sequence_length = 3
    num_classes = 2
    filter_sizes = [2, 2, 2]  #  ****啥作用?
    num_filters = 3  # 不是很懂啊

    sentences = [
        "i love you", "he loves me", "she likes baseball", "i hate you",
        "sorry for that", "this is awful"
    ]
    labels = [1, 1, 1, 0, 0, 0]

    # word_list = " ".join(sentences).split()
    # print('word_list:{}'.format(word_list))
    # word_list = list(set(word_list))
    # print('word_list_len:{}, word_list:{}'.format(len(word_list), word_list))
    # word_dict = {w: i for i, w in enumerate(word_list)}

    word_dict = {
        w: i
        for i, w in enumerate(list(set(' '.join(sentences).split())))
    }
    vocab_size = len(word_dict)

    model = TextCNN()  # 先允许TextCNN的__init__()函数

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    inputs = torch.LongTensor([[word_dict[i] for i in sen.split()]
                               for sen in sentences])
    targets = torch.LongTensor([out for out in labels])

    # input_ = ([[word_dict[n] for n in sen.split()]
    #            for sen in sentences])  # input_ :[batch_size, 3] 3为每条数据的长度
    # print('input_size:{}, input:{}'.format(
    #     torch.Tensor(input_).size(), input_))
    # inputs = torch.LongTensor(np.asarray(input_))
    # print('inputs_size:{}, inputs:{}'.format(
    #     inputs.size(), inputs))  # inputs: [6, 3] 一批数据6条,每条数据三个单词

    # 训练
    for epoch in range(5001):
        optimizer.zero_grad()
        outputs = model(inputs)

        loss = criterion(outputs, targets)
        if epoch % 1000 == 0:
            print("Epoch:", "%04d" % (epoch), 'cost=', '{:.6f}'.format(loss))

        loss.backward()
        optimizer.step()

    # Test
    test_text = 'he likes you'
    test = [[word_dict[t] for t in test_text.split()]]
    test = torch.LongTensor(test)

    # tests = [np.asarray([word_dict[n] for n in test_text.split()])]
    # test_batch = torch.LongTensor(tests)

    predict = model(test).data.max(1, keepdim=True)[1]

    print("predict:{}".format(predict))

    # 保存模型
    # torch.save(model, './model/TextCNN.pkl')

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值