提前导包:
1 importtorch2 from torch importnn, optim3 from torch.utils.data importDataLoader4 from torchvision importtransforms, datasets5
6 import visdom
1.自编码器(Auto-Encoder)
1 classAE(nn.Module):2
3 def __init__(self):4 super(AE, self).__init__()5
6 #[b, 784] => [b, 20]
7 self.encoder =nn.Sequential(8 nn.Linear(784, 256),9 nn.ReLU(),10 nn.Linear(256, 64),11 nn.ReLU(),12 nn.Linear(64, 20),13 nn.ReLU()14 )15 #[b, 20] => [b, 784]
16 self.decoder =nn.Sequential(17 nn.Linear(20, 64),18 nn.ReLU(),19 nn.Linear(64, 256),20 nn.ReLU(),21 nn.Linear(256, 784),22 nn.Sigmoid()23 )24
25 def forward(self, x): #x.shape=[b, 1, 28, 28]
26
27 batchsz =x.size(0)28 x = x.view(batchsz, 784) #flatten
29 x = self.encoder(x) #encoder [b, 20]
30 x = self.decoder(x) #decoder [b, 784]
31 x = x.view(batchsz, 1, 28, 28) #reshape [b, 1, 28, 28]
32
33 return x, None
2.变分自动编码器(Variational Auto-Encoder)
代码中的h和图中的ci,计算方法略有不同,代码中没有用指数。
KL散度计算公式(代码中与sigma相乘的torch.randn_like(sigma