GPT|手写纯解码器的Transformer代码实现

        本代码使用手写方法实现自注意力网络,是指每个模块手写而并非任何库都不可调用。有一点顺序问题是tokenizer直接用的Bert的,所以只供参考。

        本篇代码的基本方法仿照于手写纯编码器结构(也是俺),因此结构大多重复,但本篇用于评价极性识别因此本质上是个二分类任务,其它下游任务的修改可以参照这里介绍的原理,但是没有提供代码。

1.库函数

import torch
from torch import nn
from torch.utils.data import DataLoader, random_split, Dataset
from transformers import BertTokenizer
from tqdm import tqdm
from sklearn.metrics import f1_score
import numpy as np
import random
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Using device:", device)

        其中 torch 和 transformer 版本为:

        其它库函数很少因为版本问题报错。

2.一系列可更改项

        

TRAIN_FILE = '/kaggle/input/food-comment/train_food.txt'
TEST_FILE = '/kaggle/input/food-comment/test_food.txt'
MODEL_SAVE_PATH = 'sentiment_classifier.pth'
BATCH_SIZE = 32
EPOCHS = 8
LEARNING_RATE = 8e-6
MAX_SEQ_LEN = 512
VALID_RATIO = 0.15
FRACTION = 1
vocab_size = 30000  # 假设词汇表大小
embed_size = 256    # 嵌入层维度
num_heads = 4       # 注意力头数
ff_hidden_size = 512  # 前馈网络隐藏层大小
num_layers = 6      # 解码器层数
max_len = 512       # 最大序列长度
num_labels = 1      # 输出标签数量,用于情感分类(积极或消极)

        用的纯解码器模型,层数比GPT的12少一半。

3.导入数据

# 数据加载和预处理
class SentimentDataset(Dataset):
    def __init__(self, filename):
        with open(filename, 'r', encoding='utf-8') as file:
            lines = file.readlines()
        self.samples = [line.strip().split(',', 1) for line in lines]
        random.shuffle(self.samples)

    def __len__(self):
        return len(self.samples)

    def __getitem__(self, idx):
        label, text = self.samples[idx]
        return text, int(label)
print("OK")

def load_data(file_path, valid_ratio, fraction=1.0):
    dataset = SentimentDataset(file_path)
    train_size = int((1 - valid_ratio) * len(dataset) * fraction)
    valid_size = int(len(dataset) - train_size) 
    train_dataset, valid_dataset = random_split(dataset, [train_size, valid_size])
    return train_dataset, valid_dataset

train_dataset, valid_dataset = load_data(TRAIN_FILE, VALID_RATIO, fraction=FRACTION)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, shuffle=False)
test_dataset = SentimentDataset(TEST_FILE)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)
print("OK")

        如有需要,可以使用:

train_subset = Subset(train_dataset, [0,1,2,3,4,5])
train_loader = DataLoader(train_subset, batch_size=32, shuffle=True, collate_fn=collate_fn)
dev_subset = Subset(dev_dataset, range(3))
dev_loader = DataLoader(dev_subset, batch_size=32, shuffle=False, collate_fn=collate_fn)


        减少数据集的导入加速调试。

4.Transformer网络结构

1).Transformer可通用部分

#将输入的token索引转换为嵌入向量
class TokenEmbedding(nn.Module):
    def __init__(self, vocab_size, embed_size):
        super(TokenEmbedding, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)

    def forward(self, x):
        return self.embedding(x)

#将位置信息嵌入到输入向量
class PositionalEmbedding(nn.Module):
    def __init__(self, max_len, embed_size):
        super(PositionalEmbedding, self).__init__()
        self.pos_embedding = nn.Embedding(max_len, embed_size)
        self.register_buffer("position_ids", torch.arange(max_len).expand((1, -1)))

    def forward(self, x):
        position_ids = self.position_ids[:, :x.size(1)]
        return self.pos_embedding(position_ids)

#使用多头注意力机制,将输入的values、keys、queries通过线性变换生成新的表示,
#使用einsum计算注意力得分
class MultiHeadAttention(nn.Module):
    def __init__(self, embed_size, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.embed_size = embed_size

        assert embed_size % num_heads == 0, "Embedding size must be divisible by number of heads"

        self.head_dim = embed_size // num_heads
        self.values = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.fc_out = nn.Linear(num_heads * self.head_dim, embed_size)

    def forward(self, values, keys, query, mask):
        N = query.shape[0]
        value_len, key_len, query_len = values.shape[1], keys.shape[1], query.shape[1]

        values = values.reshape(N, value_len, self.num_heads, self.head_dim)
        keys = keys.reshape(N, key_len, self.num_heads, self.head_dim)
        queries = query.reshape(N, query_len, self.num_heads, self.head_dim)

        values = self.values(values)
        keys = self.keys(keys)
        queries = self.queries(queries)

        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])

        if mask is not None:
            mask = mask.unsqueeze(1).unsqueeze(2)
            energy = energy.masked_fill(mask == 0, float("-1e20"))

        attention = torch.softmax(energy / (self.embed_size ** (1 / 2)), dim=3)

        out = torch.einsum("nhql,nlhd->nqhd", [attention, values]).reshape(N, query_len, self.num_heads * self.head_dim)

        return self.fc_out(out)

#添加2个全连接层+ReLU的前馈神经网络增强捕捉输入序列关系的能力
class FeedForwardNetwork(nn.Module):
    def __init__(self, embed_size, ff_hidden_size):
        super(FeedForwardNetwork, self).__init__()
        self.fc1 = nn.Linear(embed_size, ff_hidden_size)
        self.fc2 = nn.Linear(ff_hidden_size, embed_size)

    def forward(self, x):
        return self.fc2(torch.relu(self.fc1(x)))

2).单层Decoder

# 解码器层,进行自注意力计算、残差连接、归一化、Dropout
class DecoderLayer(nn.Module):
    def __init__(self, embed_size, num_heads, ff_hidden_size, dropout):
        super(DecoderLayer, self).__init__()
        self.multi_head_attention = MultiHeadAttention(embed_size, num_heads)
        self.feed_forward = FeedForwardNetwork(embed_size, ff_hidden_size)
        self.layernorm1 = nn.LayerNorm(embed_size)
        self.layernorm2 = nn.LayerNorm(embed_size)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask):
        attention = self.multi_head_attention(x, x, x, mask)
        x = self.layernorm1(attention + x)
        x = self.dropout(x)
        forward = self.feed_forward(x)
        out = self.layernorm2(forward + x)
        out = self.dropout(out)
        return out

3).总结构定义

#总结构定义位置,堆叠6层DecoderLayer    
class TransformerDecoder(nn.Module):
    def __init__(self, vocab_size, embed_size, num_heads, ff_hidden_size, num_layers, max_len, num_labels, dropout):
        super(TransformerDecoder, self).__init__()
        self.token_embedding = TokenEmbedding(vocab_size, embed_size)
        self.position_embedding = PositionalEmbedding(max_len, embed_size)
        self.layers = nn.ModuleList(
            [DecoderLayer(embed_size, num_heads, ff_hidden_size, dropout) for _ in range(num_layers)]
        )
        self.pooling = nn.AdaptiveAvgPool1d(1)  # 添加池化层
        self.fc_out = nn.Linear(embed_size, num_labels)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x, mask):
        token_embeddings = self.token_embedding(x)
        position_embeddings = self.position_embedding(x)
        x = self.dropout(token_embeddings + position_embeddings)

        for layer in self.layers:
            x = layer(x, mask)

        x = x.transpose(1, 2)  # 调整维度以适应池化操作
        x = self.pooling(x).squeeze(2)  # 应用全局平均池化

        return self.fc_out(x)

5.定义模型

class SentimentClassifier(nn.Module):
    def __init__(self, vocab_size, embed_size, num_heads, ff_hidden_size, num_layers, max_len, num_labels, dropout=0.3):
        super(SentimentClassifier, self).__init__()
        self.decoder = TransformerDecoder(
            vocab_size=vocab_size, 
            embed_size=embed_size, 
            num_heads=num_heads, 
            ff_hidden_size=ff_hidden_size, 
            num_layers=num_layers, 
            max_len=max_len, 
            num_labels=num_labels, 
            dropout=dropout
        )

    def forward(self, x, mask):
        logits = self.decoder(x, mask)
        return logits

model = SentimentClassifier(vocab_size, embed_size, num_heads, ff_hidden_size, num_layers, max_len, num_labels).to(device)
#model.load_state_dict(torch.load("/kaggle/input/gpt_food/pytorch/strongpower/1/sentiment_classifier (3).pth"))
# 初始化 tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')

6.可视化输出与混淆矩阵

def plot_accuracies(train_accuracies, valid_accuracies):
    plt.figure(figsize=(10, 5))
    plt.plot(train_accuracies, label='Training Accuracy')
    plt.plot(valid_accuracies, label='Validation Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Accuracy Over Epochs')
    plt.legend()
    plt.show()

def plot_confusion_matrix(labels, predictions):
    cm = confusion_matrix(labels, predictions)
    disp = ConfusionMatrixDisplay(confusion_matrix=cm)
    disp.plot(cmap=plt.cm.Blues)
    plt.title('Confusion Matrix')
    plt.show()

7.train and valid

# 训练和验证函数
def train_and_validate(model, train_loader, valid_loader, epochs):
    optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
    loss_fn = nn.BCEWithLogitsLoss()

    train_accuracies = []
    valid_accuracies = []
    valid_predictions = []
    valid_labels_list = []

    for epoch in range(epochs):
        model.train()
        correct = 0
        total = 0
        loop = tqdm(train_loader, leave=True)
        for texts, labels in loop:
            optimizer.zero_grad()
            inputs = tokenizer(texts, max_length=MAX_SEQ_LEN, truncation=True, padding='max_length', return_tensors='pt')
            input_ids = inputs['input_ids'].to(device)
            attention_mask = inputs['attention_mask'].to(device)
            labels = labels.float().to(device).view(-1, 1)  # 确保标签移到GPU
            logits = model(input_ids, attention_mask)
            loss = loss_fn(logits, labels)
            loss.backward()
            optimizer.step()

            preds = torch.round(torch.sigmoid(logits))
            correct += (preds == labels).sum().item()
            total += labels.size(0)
            loop.set_description(f'Epoch {epoch + 1}/{epochs}')
            loop.set_postfix(loss=loss.item())
        
        train_accuracy = 100 * correct / total
        train_accuracies.append(train_accuracy)

        valid_accuracy, predictions, labels = validate(model, valid_loader)
        valid_accuracies.append(valid_accuracy)
        valid_predictions.extend(predictions)
        valid_labels_list.extend(labels)

    plot_accuracies(train_accuracies, valid_accuracies)
    plot_confusion_matrix(valid_labels_list, valid_predictions)

    
def validate(model, loader):
    model.eval()
    total, correct = 0, 0
    predictions, labels_list = [], []
    with torch.no_grad():
        for texts, labels in tqdm(loader, desc="validing"):
            inputs = tokenizer(texts, max_length=MAX_SEQ_LEN, truncation=True, padding='max_length', return_tensors='pt')
            input_ids = inputs['input_ids'].to(device)
            attention_mask = inputs['attention_mask'].to(device)
            labels = labels.float().to(device)
            logits = model(input_ids, attention_mask)  # 修改此行
            predicted = torch.round(torch.sigmoid(logits)).view(-1, 1)
            labels = labels.view(-1, 1)

            predictions.extend(predicted.cpu().numpy())
            labels_list.extend(labels.cpu().numpy())
            correct += (predicted == labels).sum().item()
            total += labels.numel()
            
    accuracy = 100 * correct / total  # 计算正确率
    f1 = f1_score(labels_list, predictions)  # 计算F1得分
    print(f'Validation Accuracy: {accuracy:.2f}%')
    print(f'Validation F1 Score: {f1:.2f}')  # 打印F1得分

    return accuracy, predictions, labels_list

8.test

def test(model, loader):
    model.eval()
    total, correct = 0, 0
    predictions, true_labels = [], []
    tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')

    with torch.no_grad():
        for texts, labels in tqdm(loader, desc="testing"):
            inputs = tokenizer(texts, max_length=MAX_SEQ_LEN, truncation=True, padding='max_length', return_tensors='pt')
            input_ids = inputs['input_ids'].to(device)
            attention_mask = inputs['attention_mask'].to(device)
            labels = labels.float().to(device)
            logits = model(input_ids, attention_mask)
            predicted = (logits.squeeze() > 0).float()
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
            predictions.extend(predicted.cpu().numpy())
            true_labels.extend(labels.cpu().numpy())
            
    accuracy = correct / total
    f1 = f1_score(true_labels, predictions)
    plot_confusion_matrix(true_labels, predictions)
    print(f'Test Accuracy: {accuracy:.2f}')
    print(f'Test F1 Score: {f1:.2f}')

9.主函数

# 训练、验证和测试模型
train_and_validate(model, train_loader, valid_loader, EPOCHS)
test(model, test_loader)

# 保存模型
torch.save(model.state_dict(), MODEL_SAVE_PATH)

10.输出结果

        混淆矩阵如图:

         本代码为大作业提交版本的初始版本,最终版本添加数据集增强层可以得到一定程度优化,效果与最终输出如下图:

       

       

        谨记前人教诲,如果有幸助教看到请注意,本版本非提交的优化版本而只是初始版本。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值