Mindspore框架循环神经网络RNN模型实现情感分类|(六)模型加载和推理(情感分类模型资源下载)

Mindspore框架循环神经网络RNN模型实现情感分类

Mindspore框架循环神经网络RNN模型实现情感分类|(一)IMDB影评数据集准备
Mindspore框架循环神经网络RNN模型实现情感分类|(二)预训练词向量
Mindspore框架循环神经网络RNN模型实现情感分类|(三)RNN模型构建
Mindspore框架循环神经网络RNN模型实现情感分类|(四)损失函数与优化器
Mindspore框架循环神经网络RNN模型实现情感分类|(五)模型训练
Mindspore框架循环神经网络RNN模型实现情感分类|(六)模型加载和推理(情感分类模型资源下载)
Mindspore框架循环神经网络RNN模型实现情感分类|(七)模型导出ONNX与应用部署

一、模型资源下载

  1. RNN升级版LSTM模型:本项目训练好的情感分类模型-下载训练好的IMDB分类模型

二、模型加载与推理

class RNN(nn.Cell):
    def __init__(self, embeddings, hidden_dim, output_dim, n_layers,
                 bidirectional, pad_idx):
        super().__init__()
        vocab_size, embedding_dim = embeddings.shape
        self.embedding = nn.Embedding(vocab_size, embedding_dim, embedding_table=ms.Tensor(embeddings),
                                      padding_idx=pad_idx)
        self.rnn = nn.LSTM(embedding_dim,
                           hidden_dim,
                           num_layers=n_layers,
                           bidirectional=bidirectional,
                           batch_first=True)
        weight_init = HeUniform(math.sqrt(5))
        bias_init = Uniform(1 / math.sqrt(hidden_dim * 2))
        self.fc = nn.Dense(hidden_dim * 2, output_dim, weight_init=weight_init, bias_init=bias_init)

    def construct(self, inputs):
        embedded = self.embedding(inputs)
        _, (hidden, _) = self.rnn(embedded)
        hidden = ops.concat((hidden[-2, :, :], hidden[-1, :, :]), axis=1)
        output = self.fc(hidden)
        return output

编写预测接口:test_interface

def predict_sentiment(model, vocab, sentence):
    score_map = {
        1: "Positive",
        0: "Negative"
    }
    model.set_train(False)
    tokenized = sentence.lower().split()
    indexed = vocab.tokens_to_ids(tokenized)
    tensor = ms.Tensor(indexed, ms.int32)
    tensor = tensor.expand_dims(0)
    prediction = model(tensor)
    return score_map[int(np.round(ops.sigmoid(prediction).asnumpy()))]

def test_interface():
    # train()
    score_map = {
        1: "Positive",
        0: "Negative"
    }

    ckpt_file_name = './IMDB/IMDB/sentiment-analysis.ckpt'
    # 预训练词向量表
    glove_path = r"./IMDB/IMDB/glove.6B.zip"
    vocab, embeddings = load_glove(glove_path)  # 预定义词向量表
    hidden_size = 256
    output_size = 1
    num_layers = 2
    bidirectional = True
    pad_idx = vocab.tokens_to_ids('<pad>')
    model = RNN(embeddings, hidden_size, output_size, num_layers, bidirectional, pad_idx)
    param_dict = ms.load_checkpoint(ckpt_file_name)
    ms.load_param_into_net(model, param_dict)

    # 预测
    while True:
        try:
            print("go on!")
            sentence = input("请输入:")
            res = predict_sentiment(model, vocab, sentence)
            print("用户输入的内容为:", sentence, "评价结果是:", res)
        except:
            break

def load_glove(glove_path):
    glove_100d_path = os.path.join(cache_dir, 'glove.6B.100d.txt')  # 保存数据词典
    if not os.path.exists(glove_100d_path):
        glove_zip = zipfile.ZipFile(glove_path)
        glove_zip.extractall(cache_dir)

    embeddings = []
    tokens = []
    with open(glove_100d_path, encoding='utf-8') as gf:
        for glove in gf:
            word, embedding = glove.split(maxsplit=1)
            tokens.append(word)
            embeddings.append(np.fromstring(embedding, dtype=np.float32, sep=' '))
    # 添加 <unk>, <pad> 两个特殊占位符对应的embedding
    embeddings.append(np.random.rand(100))
    embeddings.append(np.zeros((100,), np.float32))

    vocab = ds.text.Vocab.from_list(tokens, special_tokens=["<unk>", "<pad>"], special_first=False)
    embeddings = np.array(embeddings).astype(np.float32)
    return vocab, embeddings

预测推理

import mindspore as ms
import mindspore.nn as nn
import mindspore.ops as ops
import os
import zipfile
import numpy as np

test_interface()

预测结果。
在这里插入图片描述

  • 8
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
以下是使用RNN模型实现猫狗分类的示例代码,其中使用了LSTM作为RNN的基本单元: ```python import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader from torchvision.datasets import ImageFolder from torchvision.transforms import transforms # 定义数据预处理 transform = transforms.Compose([ transforms.Resize((64, 64)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]) ]) # 数据集 train_dataset = ImageFolder(root="train", transform=transform) train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True) # 定义模型 class RNN(nn.Module): def __init__(self, input_size, hidden_size, num_layers, num_classes): super(RNN, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, num_classes) def forward(self, x): h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device) out, _ = self.lstm(x, (h0, c0)) out = self.fc(out[:, -1, :]) return out # 训练模型 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = RNN(input_size=3*64*64, hidden_size=128, num_layers=2, num_classes=2) model.to(device) criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) for epoch in range(10): for images, labels in train_loader: images = images.reshape(-1, 3*64*64).to(device) labels = labels.to(device) optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() print(f"Epoch [{epoch+1}/10], Loss: {loss.item():.4f}") # 测试模型 model.eval() test_dataset = ImageFolder(root="test", transform=transform) test_loader = DataLoader(test_dataset, batch_size=16, shuffle=False) with torch.no_grad(): correct = 0 total = 0 for images, labels in test_loader: images = images.reshape(-1, 3*64*64).to(device) labels = labels.to(device) outputs = model(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print(f"Accuracy: {100*correct/total:.2f}%") ``` 在这个例子中,我们使用了一个包含两层LSTM的循环神经网络,并在最后入了一个全连接层进行分类。代码中使用了PyTorch框架,并对数据进行了预处理和归一化。在训练过程中,我们使用交叉熵作为损失函数,Adam优化器进行参数更新。最后在测试集上计算模型的准确率。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

柏常青

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值