在上一篇文章中,我们详细介绍了卷积神经网络 (CNN)、循环神经网络 (RNN) 及其变体 LSTM 和 GRU。本文将继续探讨其他必须掌握的深度学习网络架构,包括 Transformers、生成对抗网络 (GAN)、自编码器 (Autoencoder) 以及强化学习基础。我们将深入讲解这些技术的原理、应用场景,并提供相应的代码实现。
五、Transformer 架构
1. 基本原理
Transformer 是一种基于注意力机制 (Attention Mechanism) 的深度学习架构,由 Vaswani 等人在 2017 年的论文 "Attention Is All You Need" 中提出。与 RNN 和 CNN 不同,Transformer 完全摒弃了循环结构和卷积操作,仅依赖注意力机制来捕捉序列中的长距离依赖关系。
Transformer 的核心组件包括:
- 多头注意力机制 (Multi-Head Attention):允许模型从不同的表示子空间获取信息
- 自注意力机制 (Self-Attention):计算序列中每个位置对其他所有位置的注意力得分
- 位置编码 (Positional Encoding):为模型提供序列中位置信息
- 前馈神经网络 (Feed Forward Network):对每个位置的表示进行非线性变换
- 编码器 - 解码器架构 (Encoder-Decoder Architecture):分别处理输入序列和输出序列
2. 案例:机器翻译
下面是一个简化版的 Transformer 模型实现,用于英语到法语的机器翻译任务:
python
运行
import torch
import torch.nn as nn
import torch.optim as optim
import math
import numpy as np
from torchtext.legacy import data, datasets
import spacy
# 设置随机种子
SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)
# 加载分词器
spacy_en = spacy.load('en_core_web_sm')
spacy_fr = spacy.load('fr_core_news_sm')
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
def tokenize_fr(text):
return [tok.text for tok in spacy_fr.tokenizer(text)]
# 定义字段
SRC = data.Field(tokenize=tokenize_en, init_token='<sos>', eos_token='<eos>', lower=True)
TRG = data.Field(tokenize=tokenize_fr, init_token='<sos>', eos_token='<eos>', lower=True)
# 加载数据集
train_data, valid_data, test_data = datasets.Multi30k.splits(exts=('.en', '.fr'), fields=(SRC, TRG))
# 构建词汇表
SRC.build_vocab(train_data, min_freq=2)
TRG.build_vocab(train_data, min_freq=2)
# 创建迭代器
BATCH_SIZE = 128
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=BATCH_SIZE,
device=device
)
# 位置编码
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x):
return x + self.pe[:x.size(0), :]
# 多头注意力机制
class MultiHeadAttention(nn.Module):
def __init__(self, hid_dim, n_heads, dropout):
super().__init__()
assert hid_dim % n_heads == 0
self.hid_dim = hid_dim
self.n_heads = n_heads
self.head_dim = hid_dim // n_heads
self.fc_q = nn.Linear(hid_dim, hid_dim)
self.fc_k = nn.Linear(hid_dim, hid_dim)
self.fc_v = nn.Linear(hid_dim, hid_dim)
self.fc_o = nn.Linear(hid_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)
def forward(self, query, key, value, mask=None):
batch_size = query.shape[1]
# query = [seq len, batch size, hid dim]
# key = [seq len, batch size, hid dim]
# value = [seq len, batch size, hid dim]
Q = self.fc_q(query)
K = self.fc_k(key)
V = self.fc_v(value)
# Q = [seq len, batch size, n heads, head dim]
# K = [seq len, batch size, n heads, head dim]
# V = [seq len, batch size, n heads, head dim]
Q = Q.view(-1, batch_size, self.n_heads, self.head_dim).permute(1, 2, 0, 3)
K = K.view(-1, batch_size, self.n_heads, self.head_dim).permute(1, 2, 0, 3)
V = V.view(-1, batch_size, self.n_heads, self.head_dim).permute(1, 2, 0, 3)
# Q = [batch size, n heads, seq len, head dim]
# K = [batch size, n heads, seq len, head dim]
# V = [batch size, n heads, seq len, head dim]
energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale
# energy = [batch size, n heads, query len, key len]
if mask is not None:
energy = energy.masked_fill(mask == 0, -1e10)
attention = torch.softmax(energy, dim=-1)
# attention = [batch size, n heads, query len, key len]
x = torch.matmul(self.dropout(attention), V)
# x = [batch size, n heads, query len, head dim]
x = x.permute(2, 0, 1, 3).contiguous()
# x = [query len, batch size, n heads, head dim]
x = x.view(-1, batch_size, self.hid_dim)
# x = [query len, batch size, hid dim]
x = self.fc_o(x)
# x = [query len, batch size, hid dim]
return x, attention
# 前馈神经网络
class PositionwiseFeedforward(nn.Module):
def __init__(self, hid_dim, pf_dim, dropout):
super().__init__()
self.fc_1 = nn.Linear(hid_dim, pf_dim)
self.fc_2 = nn.Linear(pf_dim, hid_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
# x = [seq len, batch size, hid dim]
x = self.dropout(torch.relu(self.fc_1(x)))
# x = [seq len, batch size, pf dim]
x = self.fc_2(x)
# x = [seq len, batch size, hid dim]
return x
# 编码器层
class EncoderLayer(nn.Module):
def __init__(self, hid_dim, n_heads, pf_dim, dropout):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttention(hid_dim, n_heads, dropout)
self.positionwise_feedforward = PositionwiseFeedforward(hid_dim, pf_dim, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, src, src_mask):
# src = [src len, batch size, hid dim]
# src_mask = [batch size, 1, 1, src len]
# self attention
_src, _ = self.self_attention(src, src, src, src_mask)
# dropout, residual connection and layer norm
src = self.self_attn_layer_norm(src + self.dropout(_src))
# src = [src len, batch size, hid dim]
# positionwise feedforward
_src = self.positionwise_feedforward(src)
# dropout, residual and layer norm
src = self.ff_layer_norm(src + self.dropout(_src))
# src = [src len, batch size, hid dim]
return src
# 编码器
class Encoder(nn.Module):
def __init__(self, input_dim, hid_dim, n_layers, n_heads, pf_dim, dropout, max_len=100):
super().__init__()
self.tok_embedding = nn.Embedding(input_dim, hid_dim)
self.pos_embedding = PositionalEncoding(hid_dim, max_len)
self.layers = nn.ModuleList([EncoderLayer(hid_dim, n_heads, pf_dim, dropout)
for _ in range(n_layers)])
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, src, src_mask):
# src = [src len, batch size]
# src_mask = [batch size, 1, 1, src len]
batch_size = src.shape[1]
src_len = src.shape[0]
src = self.dropout((self.tok_embedding(src) * self.scale) + self.pos_embedding(src))
# src = [src len, batch size, hid dim]
for layer in self.layers:
src = layer(src, src_mask)
# src = [src len, batch size, hid dim]
return src
# 解码器层
class DecoderLayer(nn.Module):
def __init__(self, hid_dim, n_heads, pf_dim, dropout):
super().__init__()
self.self_attn_layer_norm = nn.LayerNorm(hid_dim)
self.enc_attn_layer_norm = nn.LayerNorm(hid_dim)
self.ff_layer_norm = nn.LayerNorm(hid_dim)
self.self_attention = MultiHeadAttention(hid_dim, n_heads, dropout)
self.encoder_attention = MultiHeadAttention(hid_dim, n_heads, dropout)
self.positionwise_feedforward = PositionwiseFeedforward(hid_dim, pf_dim, dropout)
self.dropout = nn.Dropout(dropout)
def forward(self, trg, enc_src, trg_mask, src_mask):
# trg = [trg len, batch size, hid dim]
# enc_src = [src len, batch size, hid dim]
# trg_mask = [batch size, 1, trg len, trg len]
# src_mask = [batch size, 1, 1, src len]
# self attention
_trg, _ = self.self_attention(trg, trg, trg, trg_mask)
# dropout, residual connection and layer norm
trg = self.self_attn_layer_norm(trg + self.dropout(_trg))
# trg = [trg len, batch size, hid dim]
# encoder attention
_trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)
# dropout, residual connection and layer norm
trg = self.enc_attn_layer_norm(trg + self.dropout(_trg))
# trg = [trg len, batch size, hid dim]
# positionwise feedforward
_trg = self.positionwise_feedforward(trg)
# dropout, residual and layer norm
trg = self.ff_layer_norm(trg + self.dropout(_trg))
# trg = [trg len, batch size, hid dim]
# attention = [batch size, n heads, trg len, src len]
return trg, attention
# 解码器
class Decoder(nn.Module):
def __init__(self, output_dim, hid_dim, n_layers, n_heads, pf_dim, dropout, max_len=100):
super().__init__()
self.tok_embedding = nn.Embedding(output_dim, hid_dim)
self.pos_embedding = PositionalEncoding(hid_dim, max_len)
self.layers = nn.ModuleList([DecoderLayer(hid_dim, n_heads, pf_dim, dropout)
for _ in range(n_layers)])
self.fc_out = nn.Linear(hid_dim, output_dim)
self.dropout = nn.Dropout(dropout)
self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)
def forward(self, trg, enc_src, trg_mask, src_mask):
# trg = [trg len, batch size]
# enc_src = [src len, batch size, hid dim]
# trg_mask = [batch size, 1, trg len, trg len]
# src_mask = [batch size, 1, 1, src len]
batch_size = trg.shape[1]
trg_len = trg.shape[0]
trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(trg))
# trg = [trg len, batch size, hid dim]
for layer in self.layers:
trg, attention = layer(trg, enc_src, trg_mask, src_mask)
# trg = [trg len, batch size, hid dim]
# attention = [batch size, n heads, trg len, src len]
output = self.fc_out(trg)
# output = [trg len, batch size, output dim]
return output, attention
# Transformer模型
class Transformer(nn.Module):
def __init__(self, encoder, decoder, src_pad_idx, trg_pad_idx):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.src_pad_idx = src_pad_idx
self.trg_pad_idx = trg_pad_idx
def make_src_mask(self, src):
# src = [src len, batch size]
src_mask = (src != self.src_pad_idx).permute(1, 0)
# src_mask = [batch size, src len]
src_mask = src_mask.unsqueeze(1).unsqueeze(2)
# src_mask = [batch size, 1, 1, src len]
return src_mask
def make_trg_mask(self, trg):
# trg = [trg len, batch size]
trg_pad_mask = (trg != self.trg_pad_idx).permute(1, 0)
# trg_pad_mask = [batch size, trg len]
trg_pad_mask = trg_pad_mask.unsqueeze(1).unsqueeze(2)
# trg_pad_mask = [batch size, 1, 1, trg len]
trg_len = trg.shape[0]
trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), device=device)).bool()
# trg_sub_mask = [trg len, trg len]
trg_mask = trg_pad_mask & trg_sub_mask
# trg_mask = [batch size, 1, trg len, trg len]
return trg_mask
def forward(self, src, trg):
# src = [src len, batch size]
# trg = [trg len, batch size]
src_mask = self.make_src_mask(src)
trg_mask = self.make_trg_mask(trg)
# src_mask = [batch size, 1, 1, src len]
# trg_mask = [batch size, 1, trg len, trg len]
enc_src = self.encoder(src, src_mask)
# enc_src = [src len, batch size, hid dim]
output, attention = self.decoder(trg, enc_src, trg_mask, src_mask)
# output = [trg len, batch size, output dim]
# attention = [batch size, n heads, trg len, src len]
return output, attention
# 初始化模型
INPUT_DIM = len(SRC.vocab)
OUTPUT_DIM = len(TRG.vocab)
HID_DIM = 256
ENC_LAYERS = 3
DEC_LAYERS = 3
ENC_HEADS = 8
DEC_HEADS = 8
ENC_PF_DIM = 512
DEC_PF_DIM = 512
ENC_DROPOUT = 0.1
DEC_DROPOUT = 0.1
SRC_PAD_IDX = SRC.vocab.stoi[SRC.pad_token]
TRG_PAD_IDX = TRG.vocab.stoi[TRG.pad_token]
# 创建编码器
enc = Encoder(INPUT_DIM, HID_DIM, ENC_LAYERS, ENC_HEADS, ENC_PF_DIM, ENC_DROPOUT)
# 创建解码器
dec = Decoder(OUTPUT_DIM, HID_DIM, DEC_LAYERS, DEC_HEADS, DEC_PF_DIM, DEC_DROPOUT)
# 创建模型
model = Transformer(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX).to(device)
# 初始化模型参数
def initialize_weights(m):
if hasattr(m, 'weight') and m.weight.dim() > 1:
nn.init.xavier_uniform_(m.weight.data)
model.apply(initialize_weights)
# 定义优化器和损失函数
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.CrossEntropyLoss(ignore_index=TRG_PAD_IDX)
# 训练模型
def train(model, iterator, optimizer, criterion, clip):
model.train()
epoch_loss = 0
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
optimizer.zero_grad()
# trg[:, :-1] 不包含<eos>
output, _ = model(src, trg[:-1, :])
# output = [trg len - 1, batch size, output dim]
# trg = [trg len, batch size]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[1:].contiguous().view(-1)
# output = [trg len * batch size, output dim]
# trg = [trg len * batch size]
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# 评估模型
def evaluate(model, iterator, criterion):
model.eval()
epoch_loss = 0
with torch.no_grad():
for i, batch in enumerate(iterator):
src = batch.src
trg = batch.trg
# trg[:, :-1] 不包含<eos>
output, _ = model(src, trg[:-1, :])
# output = [trg len - 1, batch size, output dim]
# trg = [trg len, batch size]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[1:].contiguous().view(-1)
# output = [trg len * batch size, output dim]
# trg = [trg len * batch size]
loss = criterion(output, trg)
epoch_loss += loss.item()
return epoch_loss / len(iterator)
# 训练模型
N_EPOCHS = 10
CLIP = 1
best_valid_loss = float('inf')
for epoch in range(N_EPOCHS):
train_loss = train(model, train_iterator, optimizer, criterion, CLIP)
valid_loss = evaluate(model, valid_iterator, criterion)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'transformer-model.pt')
print(f'Epoch: {epoch+1:02}')
print(f'\tTrain Loss: {train_loss:.3f}')
print(f'\t Val. Loss: {valid_loss:.3f}')
# 在测试集上评估模型
model.load_state_dict(torch.load('transformer-model.pt'))
test_loss = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f}')
3. 模型解析
这个 Transformer 模型的结构如下:
- 编码器:由多个编码器层组成,每个层包含自注意力机制和前馈神经网络
- 解码器:由多个解码器层组成,每个层包含自注意力机制、编码器 - 解码器注意力机制和前馈神经网络
- 注意力机制:使用缩放点积注意力和多头注意力,允许模型关注序列中的不同部分
- 位置编码:为模型提供序列中位置信息,弥补缺少循环结构的不足
Transformer 模型在机器翻译任务上取得了很好的效果,并且成为了后续许多自然语言处理模型的基础,如 BERT、GPT 等。
六、生成对抗网络 (GAN)
1. 基本原理
生成对抗网络 (Generative Adversarial Networks, GAN) 由 Ian Goodfellow 在 2014 年提出,是一种由两个神经网络组成的框架:生成器 (Generator) 和判别器 (Discriminator)。这两个网络通过对抗训练相互竞争,从而提高各自的性能。
- 生成器:尝试生成逼真的数据,如图片、文本等
- 判别器:尝试区分生成的数据和真实的数据
- 对抗训练:生成器和判别器交替训练,形成一种博弈过程
GAN 的训练目标可以表示为一个极小极大博弈:
min_G max_D V (D, G) = E [logD (x)] + E [log (1-D (G (z)))]
其中,x 是真实数据,z 是随机噪声,G (z) 是生成器生成的数据,D (x) 是判别器对真实数据的判断概率,D (G (z)) 是判别器对生成数据的判断概率。
2. 案例:手写数字生成
下面是一个使用 PyTorch 实现的简单 GAN,用于生成 MNIST 数据集中的手写数字:
python
运行
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
import numpy as np
# 设置超参数
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
learning_rate = 3e-4
batch_size = 128
image_dim = 28 * 28
noise_dim = 100
num_epochs = 50
# 数据加载和预处理
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
dataset = datasets.MNIST(root='dataset/', transform=transform, download=True)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# 定义生成器
class Generator(nn.Module):
def __init__(self, z_dim, img_dim):
super().__init__()
self.gen = nn.Sequential(
nn.Linear(z_dim, 256),
nn.LeakyReLU(0.1),
nn.Linear(256, img_dim),
nn.Tanh() # 将输出值缩放到[-1, 1]之间,与预处理后的数据范围一致
)
def forward(self, x):
return self.gen(x)
# 定义判别器
class Discriminator(nn.Module):
def __init__(self, img_dim):
super().__init__()
self.disc = nn.Sequential(
nn.Linear(img_dim, 128),
nn.LeakyReLU(0.1),
nn.Linear(128, 1),
nn.Sigmoid() # 输出概率值
)
def forward(self, x):
return self.disc(x)
# 初始化模型、优化器和损失函数
gen = Generator(noise_dim, image_dim).to(device)
disc = Discriminator(image_dim).to(device)
opt_gen = optim.Adam(gen.parameters(), lr=learning_rate)
opt_disc = optim.Adam(disc.parameters(), lr=learning_rate)
criterion = nn.BCELoss()
# 训练循环
fixed_noise = torch.randn(64, noise_dim).to(device)
for epoch in range(num_epochs):
for batch_idx, (real, _) in enumerate(loader):
real = real.view(-1, 784).to(device)
batch_size = real.shape[0]
### 训练判别器: max log(D(x)) + log(1 - D(G(z)))
noise = torch.randn(batch_size, noise_dim).to(device)
fake = gen(noise)
disc_real = disc(real).view(-1)
lossD_real = criterion(disc_real, torch.ones_like(disc_real))
disc_fake = disc(fake.detach()).view(-1)
lossD_fake = criterion(disc_fake, torch.zeros_like(disc_fake))
lossD = (lossD_real + lossD_fake) / 2
disc.zero_grad()
lossD.backward()
opt_disc.step()
### 训练生成器: min log(1 - D(G(z))) <-> max log(D(G(z))
output = disc(fake).view(-1)
lossG = criterion(output, torch.ones_like(output))
gen.zero_grad()
lossG.backward()
opt_gen.step()
if batch_idx == 0:
print(
f"Epoch [{epoch}/{num_epochs}] Batch {batch_idx}/{len(loader)} \
Loss D: {lossD:.4f}, loss G: {lossG:.4f}"
)
with torch.no_grad():
fake = gen(fixed_noise).reshape(-1, 1, 28, 28)
real = real.reshape(-1, 1, 28, 28)
img_grid_fake = torchvision.utils.make_grid(fake, normalize=True)
img_grid_real = torchvision.utils.make_grid(real, normalize=True)
# 可视化生成的图像
if epoch % 10 == 0:
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
axs[0].imshow(img_grid_fake.cpu().permute(1, 2, 0))
axs[0].set_title('Generated Images')
axs[0].axis('off')
axs[1].imshow(img_grid_real.cpu().permute(1, 2, 0))
axs[1].set_title('Real Images')
axs[1].axis('off')
plt.show()
3. 模型解析
这个 GAN 模型的结构如下:
- 生成器:接收随机噪声作为输入,通过全连接层生成 784 维的图像向量
- 判别器:接收图像向量作为输入,输出一个概率值,表示该图像是真实的概率
- 训练过程:判别器和生成器交替训练,判别器试图最大化正确分类的概率,生成器试图最大化判别器对其生成图像的错误分类概率
经过训练,生成器能够生成逼真的手写数字图像,这些图像在视觉上与真实的 MNIST 数据几乎无法区分。
七、自编码器 (Autoencoder)
1. 基本原理
自编码器 (Autoencoder) 是一种特殊类型的神经网络,用于学习数据的压缩表示 (编码) 和解压缩表示 (解码)。自编码器由两部分组成:编码器和解码器。
- 编码器 (Encoder):将输入数据压缩为低维表示 (编码)
- 解码器 (Decoder):将编码恢复为原始数据
- 训练目标:最小化重构误差,即输入数据和重构数据之间的差异
自编码器有多种变体,包括:
- 简单自编码器
- 变分自编码器 (Variational Autoencoder, VAE)
- 去噪自编码器 (Denoising Autoencoder)
- 稀疏自编码器 (Sparse Autoencoder)
2. 案例:图像压缩与重构
下面是一个使用 PyTorch 实现的简单自编码器,用于 MNIST 图像的压缩与重构:
python
运行
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
# 设置超参数
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
learning_rate = 1e-3
batch_size = 64
num_epochs = 10
# 数据加载和预处理
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
dataset = datasets.MNIST(root='dataset/', transform=transform, download=True)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# 定义自编码器
class Autoencoder(nn.Module):
def __init__(self):
super().__init__()
# 编码器
self.encoder = nn.Sequential(
nn.Linear(28 * 28, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 12),
nn.ReLU(),
nn.Linear(12, 3) # 压缩为3维
)
# 解码器
self.decoder = nn.Sequential(
nn.Linear(3, 12),
nn.ReLU(),
nn.Linear(12, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
nn.Linear(128, 28 * 28),
nn.Tanh() # 将输出值缩放到[-1, 1]之间
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
# 初始化模型、优化器和损失函数
model = Autoencoder().to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# 训练循环
for epoch in range(num_epochs):
for batch_idx, (data, _) in enumerate(loader):
data = data.view(-1, 784).to(device)
# 前向传播
recon = model(data)
loss = criterion(recon, data)
# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
# 可视化重构效果
with torch.no_grad():
sample = next(iter(loader))[0].view(-1, 784).to(device)
recon = model(sample)
sample = sample.cpu().view(-1, 1, 28, 28)
recon = recon.cpu().view(-1, 1, 28, 28)
# 显示原始图像和重构图像
fig, axes = plt.subplots(2, 5, figsize=(10, 4))
axes = axes.flatten()
for i in range(5):
axes[i].imshow(sample[i][0], cmap='gray')
axes[i].set_title('Original')
axes[i].axis('off')
axes[i+5].imshow(recon[i][0], cmap='gray')
axes[i+5].set_title('Reconstructed')
axes[i+5].axis('off')
plt.tight_layout()
plt.show()
3. 模型解析
这个自编码器模型的结构如下:
- 编码器:将 784 维的图像向量压缩为 3 维的潜在表示
- 解码器:将 3 维的潜在表示恢复为 784 维的图像向量
- 训练目标:最小化原始图像和重构图像之间的均方误差
通过训练,自编码器学会了如何将图像压缩为低维表示,并从这些表示中重构出原始图像。这种压缩表示可以用于特征提取、数据可视化和生成新数据等任务。
八、强化学习基础
1. 基本原理
强化学习 (Reinforcement Learning, RL) 是一种机器学习范式,智能体 (Agent) 通过与环境 (Environment) 进行交互,学习如何选择最优动作以最大化累积奖励。强化学习的核心组件包括:
- 智能体 (Agent):执行动作的决策者
- 环境 (Environment):智能体所处的外部世界
- 状态 (State):环境的当前情况
- 动作 (Action):智能体可以执行的操作
- 奖励 (Reward):环境对智能体动作的反馈
- 策略 (Policy):智能体选择动作的规则
强化学习的目标是找到一个最优策略 π*,使得累积奖励的期望值最大:
π* = argmax_π E [Σ γ^t R_t | π]
其中,γ 是折扣因子,R_t 是时间步 t 的奖励。
2. 案例:OpenAI Gym CartPole 问题
下面是一个使用 PyTorch 实现的深度 Q 网络 (DQN),用于解决 OpenAI Gym 中的 CartPole 平衡问题:
python
运行
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import random
from collections import deque, namedtuple
# 设置超参数
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
GAMMA = 0.99 # 折扣因子
EPS_START = 0.9 # epsilon-greedy策略初始值
EPS_END = 0.05 # epsilon-greedy策略最终值
EPS_DECAY = 200 # epsilon衰减率
BATCH_SIZE = 128 # 批次大小
LR = 1e-4 # 学习率
MEMORY_CAPACITY = 10000 # 经验回放缓冲区容量
TARGET_UPDATE = 10 # 目标网络更新频率
NUM_EPISODES = 500 # 训练回合数
# 定义经验回放缓冲区
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.memory = deque([], maxlen=capacity)
def push(self, *args):
"""保存一个转换"""
self.memory.append(Transition(*args))
def sample(self, batch_size):
"""随机采样一批转换"""
return random.sample(self.memory, batch_size)
def __len__(self):
"""返回当前缓冲区的大小"""
return len(self.memory)
# 定义Q网络
class DQN(nn.Module):
def __init__(self, state_dim, action_dim):
super(DQN, self).__init__()
self.layer1 = nn.Linear(state_dim, 128)
self.layer2 = nn.Linear(128, 128)
self.layer3 = nn.Linear(128, action_dim)
def forward(self, x):
x = torch.relu(self.layer1(x))
x = torch.relu(self.layer2(x))
return self.layer3(x)
# 初始化环境
env = gym.make('CartPole-v1')
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
# 初始化策略网络和目标网络
policy_net = DQN(state_dim, action_dim).to(device)
target_net = DQN(state_dim, action_dim).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval() # 目标网络设为评估模式
optimizer = optim.Adam(policy_net.parameters(), lr=LR)
memory = ReplayMemory(MEMORY_CAPACITY)
# epsilon-greedy策略
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
np.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
return policy_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(action_dim)]], device=device, dtype=torch.long)
# 训练函数
def optimize_model():
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# 转换为批次
batch = Transition(*zip(*transitions))
# 计算非最终状态的掩码并连接批次元素
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# 计算Q(s_t, a) - 模型计算Q(s_t),然后选择所采取的动作对应的列
state_action_values = policy_net(state_batch).gather(1, action_batch)
# 计算下一个状态的V(s_{t+1})
next_state_values = torch.zeros(BATCH_SIZE, device=device)
with torch.no_grad():
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0]
# 计算期望的Q值
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# 计算Huber损失
criterion = nn.SmoothL1Loss()
loss = criterion(state_action_values, expected_state_action_values.unsqueeze(1))
# 优化模型
optimizer.zero_grad()
loss.backward()
# 梯度裁剪
torch.nn.utils.clip_grad_value_(policy_net.parameters(), 100)
optimizer.step()
# 训练循环
episode_durations = []
for i_episode in range(NUM_EPISODES):
# 初始化环境和状态
state = env.reset()
state = torch.tensor(state, dtype=torch.float32, device=device).unsqueeze(0)
for t in range(500): # CartPole-v1的最大步数是500
# 选择动作
action = select_action(state)
# 执行动作
next_state, reward, done, _ = env.step(action.item())
# 调整奖励(如果杆子倒下,给予惩罚)
reward = torch.tensor([reward], device=device)
next_state = torch.tensor(next_state, dtype=torch.float32, device=device).unsqueeze(0)
# 保存转换到经验回放缓冲区
memory.push(state, action, next_state, reward)
# 移动到下一个状态
state = next_state
# 执行一步优化(在目标网络上)
optimize_model()
if done:
episode_durations.append(t + 1)
print(f'Episode {i_episode+1} finished after {t+1} steps')
break
# 更新目标网络,复制所有权重和偏差
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
print('Training complete')
env.close()
# 可视化训练结果
plt.figure(figsize=(10, 6))
plt.plot(episode_durations)
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.title('CartPole Training Performance')
plt.show()
3. 模型解析
这个 DQN 模型的结构如下:
- 策略网络:用于选择当前最优动作
- 目标网络:用于计算目标 Q 值,提高训练稳定性
- 经验回放缓冲区:存储智能体的经验,用于随机采样和减少数据相关性
- epsilon-greedy 策略:平衡探索和利用
通过训练,智能体学会了如何平衡杆子,使其保持直立尽可能长的时间。DQN 是强化学习中的经典算法,后续还有许多改进版本,如 Double DQN、Dueling DQN 等。
九、总结与展望
深度学习领域的网络架构不断发展和创新,本文介绍的 Transformer、GAN、自编码器和强化学习只是其中的一部分。这些技术已经在计算机视觉、自然语言处理、语音识别、机器人等领域取得了显著成果。
对于初学者来说,掌握这些核心网络架构的原理和实现是非常重要的。随着技术的不断发展,新的模型和算法不断涌现,如 Vision Transformer、Diffusion Models、Reinforcement Learning from Human Feedback 等。保持学习的热情和好奇心,不断探索和实践,才能在这个快速发展的领域中保持竞争力