废话不多,直接上源码
# 使用AE和TSNE对MNIST进行可视化 import torchvision from torchvision import transforms import torch from torch import nn import matplotlib.pyplot as plt batch_size = 30 train_dataset = torchvision.datasets.MNIST(root='.data', train=True, transform=transforms.ToTensor(), download=True) test_dataset = torchvision.datasets.MNIST(root='.data', train=False, transform=transforms.ToTensor()) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) # 定义AE class DeepAutoEncoder(nn.Module): def __int__(self): super(DeepAutoEncoder, self).__init__() self.encoder = nn.Sequential( nn.Conv2d(in_channels=1, out_channels=3, kernel_size=2, stride=1, padding=0, bias=False), nn.BatchNorm2d(3), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=2, padding=0, bias=False), nn.BatchNorm2d(6), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=6, out_channels=6, kernel_size=3, stride=2, padding=0, bias=False), nn.BatchNorm2d(6), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=6, out_channels=6, kernel_size=4, stride=2, padding=0, bias=False), nn.BatchNorm2d(6), nn.LeakyReLU(0.2, inplace=True), ) self.decoder = nn.Sequential( nn.ConvTranspose2d(in_channels=6, out_channels=6, kernel_size=4, stride=2, bias=False), nn.BatchNorm2d(6), nn.ReLU(True), nn.ConvTranspose2d(in_channels=6, out_channels=6, kernel_size=3, stride=2, bias=False), nn.BatchNorm2d(6), nn.ReLU(True), nn.ConvTranspose2d(in_channels=6, out_channels=3, kernel_size=3, stride=2, bias=False), nn.BatchNorm2d(3), nn.ReLU(True), nn.ConvTranspose2d(in_channels=3, out_channels=1, kernel_size=2, stride=1, bias=False), nn.BatchNorm2d(1), ) def forward(self, x): x = self.encoder(x) print(x.shape) x = self.decoder(x) return x AE = DeepAutoEncoder().cuda() criterion = nn.L1Loss() learning_rate = 0.001 optimizer = torch.optim.Adam(AE.parameters(), lr=learning_rate) list_loss = [] num_epoch = 10 total_step = len(train_loader) for epoch in range(num_epoch): totalloss = 0 for i, (images,_) in enumerate(train_loader): images = images.cuda() batch_size = images.size(0) AE.train() fake_image = AE(images) loss = criterion(images, fake_image) optimizer.zero_grad() # 优化器梯度清零 optimizer.step() totalloss = totalloss + loss.item() list_loss.append(totalloss / (i+1)) fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(13, 7)) axes.plot(list_loss, 'k--') T_images = torch.stack((train_dataset[i][0] for i in range(16))) show()
有用的话,一键三连哦,码字不易!