RNN学习案例

题目:用RNN做一个分类器,现在有一个数据集,数据集里有人名和对应的国家,我们需要训练一个模型,输入一个新的名字,模型能预测出是基于哪种语言的(18种不同的语言,18分类)-b站刘二大人13讲视频

1、生成数据-dataset_train.pt和dataset_test.pt

import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import gzip
import csv
import os

class NameDataset(Dataset):
    def __init__(self, is_train_set=True):
        # 读数据
        filename = './dataset/name/names_train.csv.gz' if is_train_set else './dataset/name/names_test.csv.gz'
        with gzip.open(filename, 'rt') as f:
            reader = csv.reader(f)
            rows = list(reader)

        # 数据元组(name,country),将其中的name和country提取出来,并记录数量
        self.names = [row[0] for row in rows]
        self.len = len(self.names)
        self.countries = [row[1] for row in rows]

        # 将country转换成索引
        # 列表->集合->排序->列表->字典
        self.country_list = list(sorted(set(self.countries)))
        self.country_dict = self.getCountryDict()
        # 获取长度
        self.country_num = len(self.country_list)

    # 获取键值对,country(key)-index(value)
    def __getitem__(self, index):
        return self.names[index], self.country_dict[self.countries[index]]

    def __len__(self):
        return self.len

    def getCountryDict(self):
        country_dict = dict()
        for idx, country_name in enumerate(self.country_list, 0):
            country_dict[country_name] = idx
        return country_dict

    # 根据索引返回国家名
    def idx2country(self, index):
        return self.country_list[index]

    # 返回国家数目
    def getCountriesNum(self):
        return self.country_num


if __name__ == '__main__':
    # 设置BATCH_SIZE
    BATCH_SIZE = 256
    
    save_dir = os.path.join('./dataset/name')
    trainset = NameDataset(is_train_set=True)
    trainloader = DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True)
    torch.save(trainloader, os.path.join(save_dir, 'dataset_train.pt'))
    testset = NameDataset(is_train_set=False)
    testloader = DataLoader(testset, batch_size=BATCH_SIZE, shuffle=False)
    torch.save(testloader, os.path.join(save_dir, 'dataset_test.pt'))

    # 最终的输出维度
    COUNTRY_NUM = trainset.getCountriesNum()  # 18
    print(COUNTRY_NUM)

2、读取数据,并建立模型

import torch
import matplotlib.pyplot as plt
import numpy as np
import time
import datetime
from torch.nn.utils.rnn import pack_padded_sequence
import math
import os
from test.name_classify_data import NameDataset

HIDDEN_SIZE = 100
BATCH_SIZE = 256
N_LAYER = 2
N_EPOCHS = 10
INPUT_SIZE = 128
USE_GPU = True
# 生成数据时计算得出
COUNTRY_NUM = 18

def create_tensor(tensor):
    if USE_GPU:
        device = torch.device("cuda:0")
        tensor = tensor.to(device)
    return tensor


class RNNClassifier(torch.nn.Module):
    def __init__(self, input_size, hidden_size, country_size, n_layers=1, bidirectional=True):
        super(RNNClassifier, self).__init__()
        self.hidden_size = hidden_size
        self.n_layers = n_layers
        self.n_directions = 2 if bidirectional else 1
        # Embedding层输入 (SeqLen,BatchSize)
        # Embedding层输出 (SeqLen,BatchSize,HiddenSize)
        # 将原先样本总数为SeqLen,批量数为BatchSize的数据,转换为HiddenSize维的向量
        self.embedding = torch.nn.Embedding(input_size, hidden_size)
        # bidirection用于表示神经网络是单向还是双向
        self.gru = torch.nn.GRU(hidden_size, hidden_size, n_layers, bidirectional=bidirectional)
        # 线性层需要*direction
        self.fc = torch.nn.Linear(hidden_size * self.n_directions, country_size)

    def _init_hidden(self, batch_size):
        hidden = torch.zeros(self.n_layers * self.n_directions, batch_size, self.hidden_size)
        return create_tensor(hidden)

    def forward(self, input, seq_length):
        # 对input进行转置,B x S -> S x B
        input = input.t()
        batch_size = input.size(1)

        # (n_Layer * nDirections, BatchSize, HiddenSize)
        hidden = self._init_hidden(batch_size)
        # (SeqLen, BatchSize, HiddenSize)
        embedding = self.embedding(input)

        # 对数据计算过程提速
        # 需要得到嵌入层的结果(输入数据)及每条输入数据的长度
        gru_input = pack_padded_sequence(embedding, seq_length)

        _, hidden = self.gru(gru_input, hidden)

        # 如果是双向神经网络会有h_N^f以及h_N^b两个hidden,这里为啥是取出-1和-2呢?
        if self.n_directions == 2:
            hidden_cat = torch.cat([hidden[-1], hidden[-2]], dim=1)
        else:
            hidden_cat = hidden[-1]

        fc_output = self.fc(hidden_cat)
        return fc_output


classifier = RNNClassifier(INPUT_SIZE, HIDDEN_SIZE, N_COUNTRY, N_LAYER)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(classifier.parameters(), lr=0.05)


def time_since(since):
    s = time.time() - since
    m = math.floor(s / 60)
    s -= m * 60
    return '%dm %ds' % (m, s)


# ord()取ASCII码值
def name2list(name):
    arr = [ord(c) for c in name]
    return arr, len(arr)


def make_tensors(names, countries):
    sequences_and_length = [name2list(name) for name in names]
    # 取出所有的列表中(batch)每个姓名的ASCII码序列
    name_sequences = [s1[0] for s1 in sequences_and_length]
    # 取出姓名字符的列表长度,并转换为LongTensor
    seq_lengths = torch.LongTensor([s1[1] for s1 in sequences_and_length])
    # 将字典中的整型值转为长整型
    countries = countries.long()

    # 做padding
    # 新建一个全0张量大小与整体数据一样,然后将原始数据替换对应位置的0
    seq_tensor = torch.zeros(len(name_sequences), seq_lengths.max()).long()
    # 取出每个序列及其长度idx,以此替换0
    for idx, (seq, seq_len) in enumerate(zip(name_sequences, seq_lengths), 0):
        # 将序列转化为LongTensor填充至第idx维的0到当前长度的位置
        seq_tensor[idx, :seq_len] = torch.LongTensor(seq)

    # 返回排序后的序列、索引
    seq_len, perm_idx = seq_lengths.sort(dim=0, descending=True)
    # 原始数据重新排序
    seq_tensor = seq_tensor[perm_idx]
    countries = countries[perm_idx]

    return create_tensor(seq_tensor), create_tensor(seq_len), create_tensor(countries)


def trainModel():
    total_loss = 0
    for i, (names, countries) in enumerate(trainloader, 1):
        inputs, seq_lengths, target = make_tensors(names, countries)
        output = classifier(inputs, seq_lengths.to('cpu'))
        loss = criterion(output, target)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
        # 这是迭代的次数,即打印最后一次的loss
        if i == len(trainset) // BATCH_SIZE:
            print(f"loss = {total_loss / (i * len(inputs))}")
        # if i % 10 == 0:
        #     print(f'[{time_since(start)}] Epoch {epoch} ', end='')
        #     print(f'[{i * len(inputs)}/{len(train_set)}]', end='')
        #     print(f'loss={total_loss / (i * len(inputs))}')
    return total_loss


def testModel():
    correct = 0
    total = len(testset)
    print("evaluating trained model……")
    with torch.no_grad():
        for i, (names, countries) in enumerate(testloader, 1):
            inputs, seq_lengths, target = make_tensors(names, countries)
            output = classifier(inputs, seq_lengths.to('cpu'))
            pred = output.max(dim=1, keepdim=True)[1]
            correct += pred.eq(target.view_as(pred)).sum().item()

        percent = '%.2f' % (100 * correct / total)
        print(f'Test set: Accuracy {correct}/{total} {percent}%')
    return correct / total


if __name__ == '__main__':
    print("Train for %d epochs..." % N_EPOCHS)
    start = time.time()
    trainloader = torch.load(os.path.join('./dataset/name/dataset_train.pt'))
    trainset = trainloader.dataset
    testloader = torch.load(os.path.join('./dataset/name/dataset_train.pt'))
    testset = testloader.dataset
    classifier = RNNClassifier(INPUT_SIZE, HIDDEN_SIZE, COUNTRY_NUM, N_LAYER)
    if USE_GPU:
        device = torch.device('cuda:0')
        classifier.to(device)

    criterion = torch.nn.CrossEntropyLoss()  # 计算损失
    optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001)  # 更新
    acc_list = []
    for epoch in range(1, N_EPOCHS + 1):
        # 训练
        print('%d / %d:' % (epoch, N_EPOCHS))
        trainModel()
        acc = testModel()
        acc_list.append(acc)
    end = time.time()
    print(datetime.timedelta(seconds=(end - start) // 1))
    epoch = np.arange(1, len(acc_list) + 1, 1)
    acc_list = np.array(acc_list)
    plt.plot(epoch, acc_list)
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy')
    plt.grid()
    plt.show()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值