Transformer代码阅读——Transformer部分

原作者视频链接:
【研1基本功 (真的很简单)召唤Transformer】手写“变压器”or“变形金刚”_哔哩哔哩_bilibiliicon-default.png?t=N7T8https://www.bilibili.com/video/BV1oK421Y7Vh/?spm_id_from=333.788&vd_source=01e171598915d67de063d93cfd6421e6

1、模型整体框架:

2、代码部分:

class Transformer(nn.Module):
    # 首先是两个pad,就是对输入的pad和decoder pad的一个标识符的一个记录
    # 然后告诉大家encoder vocabulary size和decoder vocabulary size 分辨是多大
    # max_len最大长度,d_model的大小,头的大小
    # 前向传播隐藏层的大小,总层数,dropout,最后还有device
    def __init__(self, src_pad_idx, trg_pad_idx, enc_voc_size, dec_voc_size, max_len, d_model, n_heads, ffn_hidden, n_layers, drop_prob, device):
        super(Transformer,self).__init__()

        self.encoder = Encoder(enc_voc_size, max_len, d_model, ffn_hidden, n_heads, n_layers, drop_prob, device)
        self.decoder = Decoder(dec_voc_size, max_len, d_model, ffn_hidden, n_heads, n_layers, drop_prob, device)

        # 生成两个padding的index标识符
        self.src_pad_idx = src_pad_idx
        self.trg_pad_idx = trg_pad_idx
        self.device = device

    def make_pad_mask(self, q, k, pad_idx_q, pad_idx_k):
        len_q, len_k = q.size(1), k.size(1)

        # (Batch, Time, len_q, len_k)  第三维和第四维是QK相乘之后得到的2*2矩阵,所以后面两个就是矩阵的一个维度
        q = q.ne(pad_idx_q).unsqueeze(1).unsqueeze(3)   # 本来q的维度是batch和len_q两维,现在为了统一格式,因此需要增加两个维度到四维
        q = q.repeat(1, 1, 1, len_k)   # 需要把len_k补全(因为每一个q都有一个对应的k)

        k = k.ne(pad_idx_k).unsqueeze(1).unsqueeze(2)
        k = k.repeat(1, 1, len_q, 1)

        # 生成Q,K之后,需要进行暗位取余的操作(全一出一,只要有零则出零)
        mask = q & k
        return mask

    def make_casual_mask(self, q, k):
        len_q, len_k = q.size(1), k.size(1)
        mask = torch.tril(torch.ones(len_q, len_k)).type(torch.BoolTensor).to(self.device)
        return mask

    def forward (self, src, trg):
        # 构建mask
        # 首先构建encoder当中自己的padding mask
        src_mask = self.make_pad_mask(src, src, self.src_pad_idx, self.src_pad_idx)
        # 然后是decoder自己的因果mask
        trg_mask = self.make_pad_mask(trg, trg, self.trg_pad_idx, self.trg_pad_idx) * self.make_casual_mask(trg, trg)
        # 交叉注意力机制的mask, q来自query(target),k来自encoder(source),
        src_trg_mask = self.make_pad_mask(trg, src, self.trg_pad_idx, self.src_pad_idx)

        enc = self.encoder(src, src_mask)
        output = self.decoder(trg, enc, trg_mask, src_trg_mask)
        return output

### 回答1: 以下是一个简单的Transformer模型的Python代码示例: ```python import torch import torch.nn as nn import torch.nn.functional as F class TransformerEncoderLayer(nn.Module): def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1): super().__init__() self.self_attn = nn.MultiheadAttention(d_model, nhead) self.norm1 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.activation = nn.ReLU() self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout2 = nn.Dropout(dropout) def forward(self, src): src2 = self.self_attn(src, src, src)[0] src = src + self.dropout1(src2) src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) src = self.norm2(src) return src class TransformerEncoder(nn.Module): def __init__(self, num_layers, d_model, nhead, dim_feedforward=2048, dropout=0.1): super().__init__() self.layers = nn.ModuleList([TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout) for _ in range(num_layers)]) self.norm = nn.LayerNorm(d_model) def forward(self, src): for layer in self.layers: src = layer(src) src = self.norm(src) return src class Transformer(nn.Module): def __init__(self, num_layers, d_model, nhead, dim_feedforward=2048, dropout=0.1): super().__init__() self.encoder = TransformerEncoder(num_layers, d_model, nhead, dim_feedforward, dropout) self.decoder = nn.Linear(d_model, 1) def forward(self, src): src = self.encoder(src) src = self.decoder(src) return src ``` 这里定义了一个简单的Transformer模型,包含了TransformerEncoderLayerTransformerEncoderTransformer三个类。其中,TransformerEncoderLayerTransformer编码器中的一个子层,用于处理输入序列信息;TransformerEncoder是多个TransformerEncoderLayer的堆叠;Transformer则是包含一个TransformerEncoder和一个输出层的完整的Transformer模型。 代码中使用了PyTorch框架,包含了一些常用的神经网络模块,如Linear、LayerNorm、Dropout等。TransformerEncoderLayer中的self_attn是多头注意力机制,用于计算输入序列的自注意力分数和上下文向量;norm1和norm2是Layer Normalization层,用于规范化输入序列和子层输出;dropout1和dropout2是Dropout层,用于防止模型过拟合;linear1和linear2是全连接层,用于对序列信息进行变换。 在这个简单的Transformer模型中,我们只使用了一个TransformerEncoder,而没有使用TransformerDecoder,因此它只能进行单向的序列转换。如果需要进行双向的序列转换,需要使用TransformerDecoderMasked Multi-Head Attention等模块。 ### 回答2: Transformer是一种用于自然语言处理任务的神经网络模型,其主要应用于机器翻译和文本生成任务。下面是一个简单的Transformer模型的代码实现。 代码实现使用Python编写,主要依赖于深度学习框架PyTorch。首先,需要导入相应的库和包: ```python import torch import torch.nn as nn import torch.nn.functional as F ``` 接下来,定义Transformer的一个关键组件——多头自注意力机制(Multi-Head Attention)。该组件通过对输入进行线性变换、注意力计算和线性变换的操作来增强特征表达能力。 ```python class MultiHeadAttention(nn.Module): def __init__(self, d_model, num_heads): super(MultiHeadAttention, self).__init__() self.num_heads = num_heads self.d_model = d_model self.d_head = d_model // num_heads self.query_linear = nn.Linear(d_model, d_model) self.key_linear = nn.Linear(d_model, d_model) self.value_linear = nn.Linear(d_model, d_model) self.final_linear = nn.Linear(d_model, d_model) def forward(self, queries, keys, values, mask=None): Q = self.query_linear(queries) K = self.key_linear(keys) V = self.value_linear(values) Q = self._split_heads(Q) K = self._split_heads(K) V = self._split_heads(V) scores = torch.matmul(Q, K.transpose(-1, -2)) / torch.sqrt(torch.tensor(self.d_head).float()) if mask is not None: mask = mask.unsqueeze(1) scores = scores.masked_fill(mask == 0, -1e9) scores = F.softmax(scores, dim=-1) attention = torch.matmul(scores, V) attention = self._combine_heads(attention) attention = self.final_linear(attention) return attention def _split_heads(self, x): batch_size, seq_len, d_model = x.size() x = x.view(batch_size, seq_len, self.num_heads, self.d_head) x = x.permute(0, 2, 1, 3).contiguous() return x.view(batch_size * self.num_heads, seq_len, self.d_head) def _combine_heads(self, x): batch_size, seq_len, d_model = x.size() x = x.view(batch_size // self.num_heads, self.num_heads, seq_len, d_model) x = x.permute(0, 2, 1, 3).contiguous() return x.view(batch_size // self.num_heads, seq_len, self.num_heads * d_model) ``` 接下来,定义Transformer的另一个关键组件——位置编码(Positional Encoding)。位置编码用于为输入序列中的每个位置添加一组固定的向量,以表示其位置信息。 ```python class PositionalEncoding(nn.Module): def __init__(self, d_model, max_len=5000): super(PositionalEncoding, self).__init__() self.d_model = d_model pe = torch.zeros(max_len, d_model) position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1) div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)) pe[:, 0::2] = torch.sin(position * div_term) pe[:, 1::2] = torch.cos(position * div_term) pe = pe.unsqueeze(0) self.register_buffer('pe', pe) def forward(self, x): x = x + self.pe[:, :x.size(1)] return x ``` 最后,定义完整的Transformer模型。 ```python class Transformer(nn.Module): def __init__(self, d_model, num_heads, num_encoder_layers, num_decoder_layers, feed_forward_dim, src_vocab_size, tgt_vocab_size, max_len): super(Transformer, self).__init__() self.encoder = nn.Embedding(src_vocab_size, d_model) self.src_positional_encoding = PositionalEncoding(d_model, max_len) self.decoder = nn.Embedding(tgt_vocab_size, d_model) self.tgt_positional_encoding = PositionalEncoding(d_model, max_len) self.encoder_layers = nn.ModuleList([nn.TransformerEncoderLayer(d_model, num_heads, feed_forward_dim) for _ in range(num_encoder_layers)]) self.decoder_layers = nn.ModuleList([nn.TransformerDecoderLayer(d_model, num_heads, feed_forward_dim) for _ in range(num_decoder_layers)]) self.fc = nn.Linear(d_model, tgt_vocab_size) def forward(self, src, tgt, src_mask=None, tgt_mask=None): src = self.encoder(src) * math.sqrt(self.d_model) src = self.src_positional_encoding(src) tgt = self.decoder(tgt) * math.sqrt(self.d_model) tgt = self.tgt_positional_encoding(tgt) for layer in self.encoder_layers: src = layer(src, src_mask) for layer in self.decoder_layers: tgt = layer(tgt, src, tgt_mask) output = self.fc(tgt) return output ``` 以上是一个简单的Transformer模型的代码实现。通过定义多头自注意力机制、位置编码和完整的Transformer模型,可以用于自然语言处理任务的实现。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值