提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
前言
提示:这里可以添加本文要记录的大概内容:
例如:随着人工智能的不断发展,自然语言处理这门技术也越来越重要,很多人都开启了学习自然语言处理,本文就介绍了自然语言处理。
#博学谷IT学习技术支持#
一、Transformer模型构建介绍
掌握编码器-解码器结构的实现过程.
掌握Transformer模型的构建过程.
二、编码器-解码器结构的代码实现
# 使用EncoderDecoder类来实现编码器-解码器结构
class EncoderDecoder(nn.Module):
def __init__(self, encoder, decoder, source_embed, target_embed, generator):
"""初始化函数中有5个参数, 分别是编码器对象, 解码器对象,
源数据嵌入函数, 目标数据嵌入函数, 以及输出部分的类别生成器对象
"""
super(EncoderDecoder, self).__init__()
# 将参数传入到类中
self.encoder = encoder
self.decoder = decoder
self.src_embed = source_embed
self.tgt_embed = target_embed
self.generator = generator
def forward(self, source, target, source_mask, target_mask):
"""在forward函数中,有四个参数, source代表源数据, target代表目标数据,
source_mask和target_mask代表对应的掩码张量"""
# 在函数中, 将source, source_mask传入编码函数, 得到结果后,
# 与source_mask,target,和target_mask一同传给解码函数.
return self.decode(self.encode(source, source_mask), source_mask,
target, target_mask)
def encode(self, source, source_mask):
"""编码函数, 以source和source_mask为参数"""
# 使用src_embed对source做处理, 然后和source_mask一起传给self.encoder
return self.encoder(self.src_embed(source), source_mask)
def decode(self, memory, source_mask, target, target_mask):
"""解码函数, 以memory即编码器的输出, source_mask, target, target_mask为参数"""
# 使用tgt_embed对target做处理, 然后和source_mask, target_mask, memory一起传给self.decoder
return self.decoder(self.tgt_embed(target), memory, source_mask, target_mask)
三、实例化参数 输入参数 及调用
vocab_size = 1000
d_model = 512
encoder = en
decoder = de
source_embed = nn.Embedding(vocab_size, d_model)
target_embed = nn.Embedding(vocab_size, d_model)
generator = gen
# 假设源数据与目标数据相同, 实际中并不相同
source = target = Variable(torch.LongTensor([[100, 2, 421, 508], [491, 998, 1, 221]]))
# 假设src_mask与tgt_mask相同,实际中并不相同
source_mask = target_mask = Variable(torch.zeros(8, 4, 4))
ed = EncoderDecoder(encoder, decoder, source_embed, target_embed, generator)
ed_result = ed(source, target, source_mask, target_mask)
print(ed_result)
print(ed_result.shape)
四、Tansformer模型构建过程的代码分析
def make_model(source_vocab, target_vocab, N=6,
d_model=512, d_ff=2048, head=8, dropout=0.1):
"""该函数用来构建模型, 有7个参数,分别是源数据特征(词汇)总数,目标数据特征(词汇)总数,
编码器和解码器堆叠数,词向量映射维度,前馈全连接网络中变换矩阵的维度,
多头注意力结构中的多头数,以及置零比率dropout."""
# 首先得到一个深度拷贝命令,接下来很多结构都需要进行深度拷贝,
# 来保证他们彼此之间相互独立,不受干扰.
c = copy.deepcopy
# 实例化了多头注意力类,得到对象attn
attn = MultiHeadedAttention(head, d_model)
# 然后实例化前馈全连接类,得到对象ff
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
# 实例化位置编码类,得到对象position
position = PositionalEncoding(d_model, dropout)
# 根据结构图, 最外层是EncoderDecoder,在EncoderDecoder中,
# 分别是编码器层,解码器层,源数据Embedding层和位置编码组成的有序结构,
# 目标数据Embedding层和位置编码组成的有序结构,以及类别生成器层.
# 在编码器层中有attention子层以及前馈全连接子层,
# 在解码器层中有两个attention子层以及前馈全连接层.
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn),
c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, source_vocab), c(position)),
nn.Sequential(Embeddings(d_model, target_vocab), c(position)),
Generator(d_model, target_vocab))
# 模型结构完成后,接下来就是初始化模型中的参数,比如线性层中的变换矩阵
# 这里一但判断参数的维度大于1,则会将其初始化成一个服从均匀分布的矩阵,
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
return model
传参调用
source_vocab = 11
target_vocab = 11
N = 6
# 其他参数都使用默认值
if __name__ == '__main__':
res = make_model(source_vocab, target_vocab, N)
print(res)
五、输出效果:
# 根据Transformer结构图构建的最终模型结构
EncoderDecoder(
(encoder): Encoder(
(layers): ModuleList(
(0): EncoderLayer(
(self_attn): MultiHeadedAttention(
(linears): ModuleList(
(0): Linear(in_features=512, out_features=512)
(1): Linear(in_features=512, out_features=512)
(2): Linear(in_features=512, out_features=512)
(3): Linear(in_features=512, out_features=512)
)
(dropout): Dropout(p=0.1)
)
(feed_forward): PositionwiseFeedForward(
(w_1): Linear(in_features=512, out_features=2048)
(w_2): Linear(in_features=2048, out_features=512)
(dropout): Dropout(p=0.1)
)
(sublayer): ModuleList(
(0): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
(1): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
)
)
(1): EncoderLayer(
(self_attn): MultiHeadedAttention(
(linears): ModuleList(
(0): Linear(in_features=512, out_features=512)
(1): Linear(in_features=512, out_features=512)
(2): Linear(in_features=512, out_features=512)
(3): Linear(in_features=512, out_features=512)
)
(dropout): Dropout(p=0.1)
)
(feed_forward): PositionwiseFeedForward(
(w_1): Linear(in_features=512, out_features=2048)
(w_2): Linear(in_features=2048, out_features=512)
(dropout): Dropout(p=0.1)
)
(sublayer): ModuleList(
(0): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
(1): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
)
)
)
(norm): LayerNorm(
)
)
(decoder): Decoder(
(layers): ModuleList(
(0): DecoderLayer(
(self_attn): MultiHeadedAttention(
(linears): ModuleList(
(0): Linear(in_features=512, out_features=512)
(1): Linear(in_features=512, out_features=512)
(2): Linear(in_features=512, out_features=512)
(3): Linear(in_features=512, out_features=512)
)
(dropout): Dropout(p=0.1)
)
(src_attn): MultiHeadedAttention(
(linears): ModuleList(
(0): Linear(in_features=512, out_features=512)
(1): Linear(in_features=512, out_features=512)
(2): Linear(in_features=512, out_features=512)
(3): Linear(in_features=512, out_features=512)
)
(dropout): Dropout(p=0.1)
)
(feed_forward): PositionwiseFeedForward(
(w_1): Linear(in_features=512, out_features=2048)
(w_2): Linear(in_features=2048, out_features=512)
(dropout): Dropout(p=0.1)
)
(sublayer): ModuleList(
(0): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
(1): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
(2): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
)
)
(1): DecoderLayer(
(self_attn): MultiHeadedAttention(
(linears): ModuleList(
(0): Linear(in_features=512, out_features=512)
(1): Linear(in_features=512, out_features=512)
(2): Linear(in_features=512, out_features=512)
(3): Linear(in_features=512, out_features=512)
)
(dropout): Dropout(p=0.1)
)
(src_attn): MultiHeadedAttention(
(linears): ModuleList(
(0): Linear(in_features=512, out_features=512)
(1): Linear(in_features=512, out_features=512)
(2): Linear(in_features=512, out_features=512)
(3): Linear(in_features=512, out_features=512)
)
(dropout): Dropout(p=0.1)
)
(feed_forward): PositionwiseFeedForward(
(w_1): Linear(in_features=512, out_features=2048)
(w_2): Linear(in_features=2048, out_features=512)
(dropout): Dropout(p=0.1)
)
(sublayer): ModuleList(
(0): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
(1): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
(2): SublayerConnection(
(norm): LayerNorm(
)
(dropout): Dropout(p=0.1)
)
)
)
)
(norm): LayerNorm(
)
)
(src_embed): Sequential(
(0): Embeddings(
(lut): Embedding(11, 512)
)
(1): PositionalEncoding(
(dropout): Dropout(p=0.1)
)
)
(tgt_embed): Sequential(
(0): Embeddings(
(lut): Embedding(11, 512)
)
(1): PositionalEncoding(
(dropout): Dropout(p=0.1)
)
)
(generator): Generator(
(proj): Linear(in_features=512, out_features=11)
)
)
总结
学习并实现了编码器-解码器结构的类: EncoderDecoder
类的初始化函数传入5个参数, 分别是编码器对象, 解码器对象, 源数据嵌入函数, 目标数据嵌入函数, 以及输出部分的类别生成器对象.
类中共实现三个函数, forward, encode, decode
forward是主要逻辑函数, 有四个参数, source代表源数据, target代表目标数据, source_mask和target_mask代表对应的掩码张量.
encode是编码函数, 以source和source_mask为参数.
decode是解码函数, 以memory即编码器的输出, source_mask, target, target_mask为参数
学习并实现了模型构建函数: make_model
有7个参数,分别是源数据特征(词汇)总数,目标数据特征(词汇)总数,编码器和解码器堆叠数,词向量映射维度,前馈全连接网络中变换矩阵的维度,多头注意力结构中的多头数,以及置零比率dropout.
该函数最后返回一个构建好的模型对象.