- 🍨 本文为🔗365天深度学习训练营 中的学习记录博客
- 🍖 原作者:K同学啊
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms, datasets
import os
import PIL
import pathlib
import warnings
warnings.filterwarnings('ignore')
device = torch.device('cpu')
import pandas as pd
train_data = pd.read_csv('./data/n5_train.csv', sep='\t', header=None)
train_data.head()
0 1
0 还有双鸭山到淮阴的汽车票吗13号的 Travel-Query
1 从这里怎么回家 Travel-Query
2 随便播放一首专辑阁楼里的佛里的歌 Music-Play
3 给看一下墓王之王嘛 FilmTele-Play
4 我想看挑战两把s686打突变团竞的游戏视频 Video-Play
def coustom_data_iter(texts, labels):
for x, y in zip(texts, labels):
yield x, y
train_iter = coustom_data_iter(train_data[0].values[:], train_data[1].values[:])
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
import jieba
# 中文分词
tokenizer = jieba.lcut
def yield_tokens(data_iter):
for text, _ in data_iter:
yield tokenizer(text)
vocab = build_vocab_from_iterator(yield_tokens(train_iter), specials=['<unk>'])
vocab.set_default_index(vocab['<unk>']) # 设置默认索引,如果找不到单词,则选择默认索引
Building prefix dict from the default dictionary …
Loading model from cache C:\Users\dlt_t\AppData\Local\Temp\jieba.cache
Loading model cost 1.055 seconds.
Prefix dict has been built successfully.
label_name = list(set(train_data[1].values[:]))
label_name
[‘Calendar-Query’,
‘Music-Play’,
‘FilmTele-Play’,
‘Weather-Query’,
‘Travel-Query’,
‘Video-Play’,
‘TVProgram-Play’,
‘Audio-Play’,
‘Other’,
‘Alarm-Update’,
‘HomeAppliance-Control’,
‘Radio-Listen’]
text_pipeline = lambda x: vocab(tokenizer(x))
label_pipeline = lambda x: label_name.index(x)
print(text_pipeline('随便播放一首歌'))
print(label_pipeline('Music-Play'))
[173, 4, 1499]
1
from torch.utils.data import DataLoader
def collate_batch(batch):
label_list, text_list, offsets = [], [], [0]
for (_text, _label) in batch:
# 标签列表
label_list.append(label_pipeline(_label))
# 文本列表
processed_text = torch.tensor(text_pipeline(_text))
text_list.append(processed_text)
# 偏移量,语句总词汇量
offsets.append(processed_text.size(0))
label_list = torch.tensor(label_list, dtype=torch.int64)
text_list = torch.cat(text_list)
offsets = torch.tensor(offsets[:-1]).cumsum(dim=0)
return text_list.to(device), label_list.to(device), offsets.to(device)
dataloader = DataLoader(train_iter,
batch_size=8,
shuffle=False,
collate_fn=collate_batch)
from torch import nn
class TextClassificationModel(nn.Module):
def __init__(self, vocab_size, embed_dim, num_class):
super(TextClassificationModel, self).__init__()
self.embedding = nn.EmbeddingBag(vocab_size, # 词典大小
embed_dim, # 嵌入维度
sparse=False)
self.fc = nn.Linear(embed_dim, num_class)
self.init_weights()
def init_weights(self):
initrange = 0.5
self.embedding.weight.data.uniform_(-initrange, initrange) # 初始化权重
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc.bias.data.zero_() # 偏置值归零
def forward(self, text, offsets):
embedded = self.embedding(text, offsets)
return self.fc(embedded)
self.embedding.weight.data.uniform_(-initrange, initrange)这段代码是在 PyTorch 框架下用于初始化神经网络的词嵌入层(embedding layer)权重的一种方法。这里使用了均匀分布的随机值来初始化权重,具体来说,其作用如下:
self.embedding: 这是神经网络中的词嵌入层(embedding layer)。词嵌入层的作用是将离散的单词表示(通常为整数索引)映射为固定大小的连续向量。这些向量捕捉了单词之间的语义关系,并作为网络的输入。
self.embedding.weight: 这是词嵌入层的权重矩阵,它的形状为 (vocab_size, embedding_dim),其中 vocab_size 是词汇表的大小,embedding_dim 是嵌入向量的维度。
self.embedding.weight.data: 这是权重矩阵的数据部分,我们可以在这里直接操作其底层的张量。
.uniform_(-initrange, initrange): 这是一个原地操作(in-place operation),用于将权重矩阵的值用一个均匀分布进行初始化。均匀分布的范围为 [-initrange, initrange],其中 initrange 是一个正数。
通过这种方式初始化词嵌入层的权重,可以使得模型在训练开始时具有一定的随机性,有助于避免梯度消失或梯度爆炸等问题。在训练过程中,这些权重将通过优化算法不断更新,以捕捉到更好的单词表示。
num_class = len(label_name)
vocab_size = len(vocab)
em_size = 64
model = TextClassificationModel(vocab_size, em_size, num_class).to(device)
import time
def train(dataloader):
model.train() # 切换为训练模式
total_acc, train_loss, total_count = 0, 0, 0
log_interval = 50
start_time = time.time()
for idx, (text,label,offsets) in enumerate(dataloader):
predicted_label = model(text, offsets)
optimizer.zero_grad() # grad属性归零
loss = criterion(predicted_label, label) # 计算网络输出和真实值之间的差距,label为真实值
loss.backward() # 反向传播
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1) # 梯度裁剪
optimizer.step() # 每一步自动更新
# 记录acc与loss
total_acc += (predicted_label.argmax(1) == label).sum().item()
train_loss += loss.item()
total_count += label.size(0)
if idx % log_interval == 0 and idx > 0:
elapsed = time.time() - start_time
print('| epoch {:1d} | {:4d}/{:4d} batches '
'| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),
total_acc/total_count, train_loss/total_count))
total_acc, train_loss, total_count = 0, 0, 0
start_time = time.time()
def evaluate(dataloader):
model.eval() # 切换为测试模式
total_acc, train_loss, total_count = 0, 0, 0
with torch.no_grad():
for idx, (text,label,offsets) in enumerate(dataloader):
predicted_label = model(text, offsets)
loss = criterion(predicted_label, label) # 计算loss值
# 记录测试数据
total_acc += (predicted_label.argmax(1) == label).sum().item()
train_loss += loss.item()
total_count += label.size(0)
return total_acc/total_count, train_loss/total_count
from torch.utils.data.dataset import random_split
from torchtext.data.functional import to_map_style_dataset
# 超参数
EPOCHS = 10 # epoch
LR = 5 # 学习率
BATCH_SIZE = 64 # batch size for training
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.1)
total_accu = None
# 构建数据集
train_iter = coustom_data_iter(train_data[0].values[:], train_data[1].values[:])
train_dataset = to_map_style_dataset(train_iter)
split_train_, split_valid_ = random_split(train_dataset,
[int(len(train_dataset)*0.8),int(len(train_dataset)*0.2)])
train_dataloader = DataLoader(split_train_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
valid_dataloader = DataLoader(split_valid_, batch_size=BATCH_SIZE,
shuffle=True, collate_fn=collate_batch)
for epoch in range(1, EPOCHS + 1):
epoch_start_time = time.time()
train(train_dataloader)
val_acc, val_loss = evaluate(valid_dataloader)
# 获取当前的学习率
lr = optimizer.state_dict()['param_groups'][0]['lr']
if total_accu is not None and total_accu > val_acc:
scheduler.step()
else:
total_accu = val_acc
print('-' * 69)
print('| epoch {:1d} | time: {:4.2f}s | '
'valid_acc {:4.3f} valid_loss {:4.3f} | lr {:4.6f}'.format(epoch,
time.time() - epoch_start_time,
val_acc,val_loss,lr))
print('-' * 69)
| epoch 1 | 50/ 152 batches | train_acc 0.456 train_loss 0.03034
| epoch 1 | 100/ 152 batches | train_acc 0.722 train_loss 0.01876
| epoch 1 | 150/ 152 batches | train_acc 0.761 train_loss 0.01404
---------------------------------------------------------------------
| epoch 1 | time: 2.81s | valid_acc 0.803 valid_loss 0.012 | lr 5.000000
---------------------------------------------------------------------
| epoch 2 | 50/ 152 batches | train_acc 0.813 train_loss 0.01089
| epoch 2 | 100/ 152 batches | train_acc 0.838 train_loss 0.00895
| epoch 2 | 150/ 152 batches | train_acc 0.851 train_loss 0.00815
---------------------------------------------------------------------
| epoch 2 | time: 2.16s | valid_acc 0.848 valid_loss 0.008 | lr 5.000000
---------------------------------------------------------------------
| epoch 3 | 50/ 152 batches | train_acc 0.880 train_loss 0.00675
| epoch 3 | 100/ 152 batches | train_acc 0.877 train_loss 0.00674
| epoch 3 | 150/ 152 batches | train_acc 0.895 train_loss 0.00585
---------------------------------------------------------------------
| epoch 3 | time: 2.75s | valid_acc 0.875 valid_loss 0.007 | lr 5.000000
---------------------------------------------------------------------
| epoch 4 | 50/ 152 batches | train_acc 0.904 train_loss 0.00549
| epoch 4 | 100/ 152 batches | train_acc 0.917 train_loss 0.00480
| epoch 4 | 150/ 152 batches | train_acc 0.920 train_loss 0.00438
---------------------------------------------------------------------
| epoch 4 | time: 2.67s | valid_acc 0.890 valid_loss 0.006 | lr 5.000000
---------------------------------------------------------------------
| epoch 5 | 50/ 152 batches | train_acc 0.931 train_loss 0.00386
| epoch 5 | 100/ 152 batches | train_acc 0.932 train_loss 0.00389
| epoch 5 | 150/ 152 batches | train_acc 0.935 train_loss 0.00373
---------------------------------------------------------------------
| epoch 5 | time: 2.93s | valid_acc 0.896 valid_loss 0.006 | lr 5.000000
---------------------------------------------------------------------
| epoch 6 | 50/ 152 batches | train_acc 0.946 train_loss 0.00319
| epoch 6 | 100/ 152 batches | train_acc 0.953 train_loss 0.00290
| epoch 6 | 150/ 152 batches | train_acc 0.950 train_loss 0.00294
---------------------------------------------------------------------
| epoch 6 | time: 2.74s | valid_acc 0.902 valid_loss 0.005 | lr 5.000000
---------------------------------------------------------------------
| epoch 7 | 50/ 152 batches | train_acc 0.967 train_loss 0.00237
| epoch 7 | 100/ 152 batches | train_acc 0.962 train_loss 0.00240
| epoch 7 | 150/ 152 batches | train_acc 0.956 train_loss 0.00243
---------------------------------------------------------------------
| epoch 7 | time: 2.86s | valid_acc 0.905 valid_loss 0.005 | lr 5.000000
---------------------------------------------------------------------
| epoch 8 | 50/ 152 batches | train_acc 0.971 train_loss 0.00190
| epoch 8 | 100/ 152 batches | train_acc 0.972 train_loss 0.00190
| epoch 8 | 150/ 152 batches | train_acc 0.971 train_loss 0.00189
---------------------------------------------------------------------
| epoch 8 | time: 2.60s | valid_acc 0.902 valid_loss 0.005 | lr 5.000000
---------------------------------------------------------------------
| epoch 9 | 50/ 152 batches | train_acc 0.983 train_loss 0.00146
| epoch 9 | 100/ 152 batches | train_acc 0.983 train_loss 0.00137
| epoch 9 | 150/ 152 batches | train_acc 0.983 train_loss 0.00142
---------------------------------------------------------------------
| epoch 9 | time: 2.51s | valid_acc 0.907 valid_loss 0.005 | lr 0.500000
---------------------------------------------------------------------
| epoch 10 | 50/ 152 batches | train_acc 0.984 train_loss 0.00143
| epoch 10 | 100/ 152 batches | train_acc 0.987 train_loss 0.00130
| epoch 10 | 150/ 152 batches | train_acc 0.983 train_loss 0.00136
---------------------------------------------------------------------
| epoch 10 | time: 2.52s | valid_acc 0.909 valid_loss 0.005 | lr 0.500000
---------------------------------------------------------------------
test_acc, test_loss = evaluate(valid_dataloader)
print('模型准确率为:{:5.4f}'.format(test_acc))
模型准确率为:0.9087
def predict(text, text_pipeline):
with torch.no_grad():
text = torch.tensor(text_pipeline(text))
output = model(text, torch.tensor([0]))
return output.argmax(1).item()
# ex_text_str = "随便播放一首专辑阁楼里的佛里的歌"
ex_text_str = "还有双鸭山到淮阴的汽车票吗13号的"
model = model.to("cpu")
print("该文本的类别是:%s" %label_name[predict(ex_text_str, text_pipeline)])
该文本的类别是:Travel-Query