案例代码
直接运行即可,然后根据自己的任务做
#!/usr/bin/env Python
# coding=utf-8
import torch
import torch.nn as nn
import torch.utils.data as Data
import torch.nn.functional as F
class TextRCNNModel(nn.Module):
def __init__(self, config):
super(TextRCNNModel, self).__init__()
self.lstm = nn.LSTM(config['embedding_size'], config['lstm_hidden_size'], 1,
bidirectional=True, batch_first=True)
self.maxpool = nn.MaxPool1d(config['pad_size'])
self.fc = nn.Linear(config['lstm_hidden_size'] * 2 # 由于是双向LSTM,所以这里 * 2
+ config['embedding_size'], config['output_size'])
def forward(self, x):
out, _ = self.lstm(x)
out = torch.cat((x, out), 2)
out = F.relu(out)
out = out.permute(0, 2, 1)
out = self.maxpool(out).squeeze()
out = self.fc(out)
return out
def get_total_train_data(word_embedding_size, class_count, pad_size):
"""得到全部的训练数据,这里需要替换成自己的数据"""
import numpy as np
x_train = torch.Tensor(
np.random.random((1000, pad_size, word_embedding_size))) # 维度是 [ 数据量, 一句话的词个数, 每个词的embedding]
y_train = torch.Tensor(
np.random.randint(0, class_count, size=(1000, 1))).long() # [ 数据量, 句子的分类], 这里的class_count=4,就是四分类任务
return x_train, y_train
if __name__ == '__main__':
# ================训练参数=================
epochs = 1000
batch_size = 30
output_class = 14
embedding_size = 350
# ================模型参数=================
config = {
# 重要参数
'embedding_size': embedding_size, # 输入的字的embedding的长度
'output_size': output_class, # 最终分类任务的数量
'pad_size': 40, # 每句话的字的个数
# 次要参数
'lstm_hidden_size': 256,
}
# ================开始训练================
x_train, y_train = get_total_train_data(embedding_size, output_class, config['pad_size'])
train_loader = Data.DataLoader(
dataset=Data.TensorDataset(x_train, y_train), # 封装进Data.TensorDataset()类的数据,可以为任意维度
batch_size=batch_size, # 每块的大小
shuffle=True, # 要不要打乱数据 (打乱比较好)
num_workers=6, # 多进程(multiprocess)来读数据
drop_last=True,
)
model = TextRCNNModel(config=config)
cross_loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001) # 优化器
model.train()
for i in range(epochs):
for seq, labels in train_loader:
optimizer.zero_grad()
y_pred = model(seq) # 压缩维度:得到输出,并将维度为1的去除
single_loss = cross_loss(y_pred, labels.squeeze())
single_loss.backward()
optimizer.step()
print("Step: " + str(i) + " loss : " + str(single_loss.detach().numpy()))