Transformer代码

一、Embedding

import torch
from torch import nn
import torch.nn.functional as F
import math
from torch import Tensor

#将输入的词汇表索引转换成指定维度的Embedding向量
class TokenEmbedding(nn.Embedding):
    def __init__(self,vocab_size,d_model):
        super(TokenEmbedding,self).__init__(vocab_size,d_model,padding_idx=1)


#通过位置编码计算输入序列的位置信息
class PositionalEmbedding(nn.Module):
    def __init__(self,d_model,max_len,device):
        super(PositionalEmbedding,self).__init__()
        #初始化全零矩阵
        self.encoding = torch.zeros(max_len,d_model,device=device)
        self.encoding.requires_grad = False
        pos = torch.arange(0,max_len,device=device)
        pos = pos.float().unsqueeze(1)
        _2i = torch.arange(0,d_model,step=2,device=device)
        #做奇偶区别
        self.encoding[:,0::2] = torch.sin(pos*torch.exp(_2i*-(math.log(10000.0)/d_model)))
        self.encoding[:,1::2] = torch.cos(pos*torch.exp(_2i*-(math.log(10000.0)/d_model)))

    #前向传播
    def forward(self,x):
        batch_size,seq_len = x.size()
        return self.encoding[:seq_len,:]
#
class TransformerEmbedding(nn.Module):
    def __init__(self,vocab_size,d_model,max_len,drop_prob,device):
        self.tok_emb = TokenEmbedding(vocab_size,d_model)
        self.pos_emb = PositionalEmbedding(d_model,max_len,device)
        self.drop = nn.Dropout(p=drop_prob)

    def forward(self,x):
        tok_emb = self.tok_emb(x)
        pos_emb = self.pos_emb(x)
        return self.drop(tok_emb + pos_emb)

二、Multi-Head Attention

import torch
from torch import nn
import torch.nn.functional as F
import math
from torch import Tensor

x = torch.rand(128,32,512)
d_model = 512
n_head = 8

class MutiHeadAttention(nn.Module):
    def __init__(self,d_model,n_head):
        super(MutiHeadAttention,self).__init__()
        self.n_head = n_head
        self.d_model = d_model
        self.w_q = nn.Linear(d_model,d_model)
        self.w_k = nn.Linear(d_model,d_model)
        self.w_v = nn.Linear(d_model,d_model)

        self.w_combine = nn.Linear(d_model,d_model)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self,q,k,v,mask = None):
        batch,time,dimension = q.shape
        # q k v 的维度都是 batch * time * dimension
        n_d = self.d_model // self.n_head
        # n_d = dimension // self.n_head
        q,k,v = self.w_q(q),self.w_k(k),self.w_v(v)
        #重塑
        q = q.view(batch,time,self.n_head,n_d).permute(0,2,1,3)
        k = k.view(batch,time,self.n_head,n_d).permute(0,2,1,3)
        v = v.view(batch,time,self.n_head,n_d).permute(0,2,1,3)
        #连接计算(q与k的转置)
        score = q@k.transpose(2,3)/math.sqrt(n_d)
        if mask is not None:
            score = score.masked_fill(mask == 0,-10000)
        score = self.softmax(score)@v
        score = score.permute(0,2,1,3).contiguous().view(batch,time,dimension)
        #计算输出
        output = self.w_combine(score)
        return output

attention = MutiHeadAttention(d_model,n_head)
print(attention(x,x,x))

三、Layer Normalization

import torch
from torch import nn
import torch.nn.functional as F
import math
from torch import Tensor


class LayerNorm(nn.Module):
    def __init__(self,d_model,eps = 1e-12):
        super(LayerNorm,self).__init__()
        self.gamma = nn.Parameter(torch.ones(d_model))
        self.beta = nn.Parameter(torch.zeros(d_model))
        self.eps = eps

    #前向传播
    def forward(self,x):
        mean = x.mean(-1,keepdim = True)
        var = x.var(-1,unbiased = False,keepdim = True)
        out = (x - mean) / torch.sqrt(var + self.eps)
        out = self.gamma * out + self.beta
        return out

四、Encoder

import torch.nn as nn
import torch.nn.functional as F
import torch
from Transformer.Embedding import TransformerEmbedding


class PositionwiseFeedForward(nn.Module):
    def __init__(self, d_model, hidden,dropout = 0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.fc1 = nn.Linear(d_model, hidden)
        self.fc2 = nn.Linear(hidden, d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout(x)
        x = self.fc2(x)
        return x


class EncoderLayer(nn.Module):
    def __init__(self, d_model, n_head, ffn_hidden, dropout = 0.1):
        super(EncoderLayer,self).__init__()
        self.attention = nn.MultiheadAttention(d_model, n_head, dropout = dropout)
        self.norm1 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.ffn = PositionwiseFeedForward(d_model, ffn_hidden, dropout = dropout)

        self.norm2 = nn.LayerNorm(d_model)
        self.dropout2 = nn.Dropout(dropout)

    def forward(self, x,mask = None):
        _x = x
        x = self.attention(x,x,x,mask)
        x = self.dropout1(x)
        #残差连接
        x = self.norm1(x + _x)
        _x = x
        #前馈网络
        x = self.ffn(x)
        x = self.dropout2(x)
        #残差连接
        x = self.norm2(x + _x)
        return x

class Encoder(nn.Module):
    # device = 'cuda' if torch.cuda.is_available() else 'cpu'
    def __init__(self, enc_voc_size, max_len,d_model,ffn_hidden, n_head, n_layer, dropout = 0.1,device = "gpu"):
        super(Encoder, self).__init__()
        self.embedding = TransformerEmbedding(enc_voc_size, max_len, d_model, dropout = 0.1,device = "gpu")
        self.layers = nn.ModuleList([EncoderLayer(d_model, ffn_hidden, n_head,device) for _ in range(n_layer)])

    def forward(self,x,s_mask):
        #x的维度为[seq_len,batch_size]
        x = self.embedding(x)
        for layer in self.layers:
            x = layer(x,s_mask)
        return x

五、Decoder

import torch
from torch import nn
import torch.nn.functional as F
import math
from torch import Tensor
from Transformer.layernorm import LayerNorm
from Transformer.Attention import MutiHeadAttention
from Transformer.Encoder import PositionwiseFeedForward
from Transformer.Embedding import TransformerEmbedding


class DecoderLayer(nn.Module):
    def __init__(self,d_model,ffn_hidden,n_head,drop_prob):
        super(DecoderLayer,self).__init__()
        self.attention1 = MutiHeadAttention(n_head,d_model,drop_prob)
        self.norm1 = LayerNorm(d_model)
        self.dropout1 = nn.Dropout(drop_prob)
        self.cross_attention = MutiHeadAttention(n_head,d_model,drop_prob)
        self.norm2 = LayerNorm(d_model)
        self.dropout2 = nn.Dropout(drop_prob)
        self.ffn = PositionwiseFeedForward(d_model,ffn_hidden,drop_prob)
        self.norm3 = LayerNorm(d_model)
        self.dropout3 = nn.Dropout(drop_prob)


    def forward(self,x,dec,enc,t_mask,s_mask):
        _x = dec
        s = self.attention1(dec,dec,dec,t_mask)
        x = self.dropout1(x)
        x = self.norm1(x + _x)
        _x = x

        x = self.cross_attention(x,enc,enc,s_mask)
        x = self.dropout2(x)
        x = self.norm2(x + _x)
        x = self.ffn(x)
        x = self.dropout3(x)
        x = self.norm3(x + _x)
        return x


class Decoder(nn.Module):
    def __init__(self,dec_voc_size,max_len,d_model,ffn_hidden,n_head,n_layer,drop_prob,device):
        super(Decoder,self).__init__()
        self.embedding = TransformerEmbedding(dec_voc_size,d_model,max_len,drop_prob,device)
        self.layers = nn.ModuleList([DecoderLayer(d_model,ffn_hidden,n_head,drop_prob) for _ in range(n_layer)])
        self.fc = nn.Linear(d_model,dec_voc_size)

    def forward(self,dec,enc,t_mask,s_mask):
        dec = self.embedding(dec)
        for layer in self.layers:
            dec = layer(dec,enc,t_mask,s_mask)
        dec = self.fc(dec)
        return dec

六、Transformer 

import torch
from torch import nn
import torch.nn.functional as F
import math
from torch import Tensor

from Transformer.Decoder import Decoder
from Transformer.Encoder import Encoder


class Transformer(nn.Module):
    def __init__(self,
                 src_pad_idx,
                 trg_pad_idx,
                 enc_voc_size,
                 dec_voc_size,
                 d_model,
                 n_heads,
                 ffn_hidden,
                 n_layers,
                 drop_prob,
                 device):
        super(Transformer,self).__init__()
        self.encoder = Encoder(enc_voc_size, d_model, n_heads, ffn_hidden, n_layers, drop_prob, device)

        self.decoder = Decoder(dec_voc_size, d_model, n_heads, ffn_hidden, n_layers, drop_prob, device)
        self.src_pad_idx = src_pad_idx
        self.trg_pad_idx = trg_pad_idx
        self.device = device
    def make_pad_mask(self,q,k,pad_idx_q,pad_idx_k):
        len_q,len_k = q.size(1), k.size(1)
        q = q.ne(pad_idx_q).unsqueeze(1).unsqueez(3)
        q = q.repeat(1,1,1,len_k)
        k = k.ne(pad_idx_k).unsqueeze(1).unsqueeze(2)
        k = k.repeat(1,1,len_q,1)
        mask = q & k
        return mask

    def make_casual_mask(self,q,k,len_q,len_k):
        mask = torch.trill(torch.ones(len_q,len_k)).type(torch.BoolTensor).to(self.device)
        return mask

    def forward(self,src,trg):
        src_mask = self.make_pad_mask(src,src,self.src_pad_idx,self.src_pad_idx)
        trg_mask = self.make_pad_mask(trg,trg,self.trg_pad_idx,self.trg_pad_idx) * self.make_casual_mask(trg,trg)
        enc = self.encoder(src,src_mask)
        out = self.decoder(trg,src,trg_mask,src_mask)
        return out


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

几两春秋梦_

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值