'''
Author: 365JHWZGo
Description: 20.autoencoder--review
Date: 2021/11/6 15:28
FilePath: day1106-1.py
'''
# 导包
import os
import torch
import torchvision
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.utils.data as Data
import numpy as np
# hyper parameters
BATCH_SIZE = 64
LR = 0.002
EPOCH = 10
DOWNLOAD_MNIST = False
N_TEST_IMG = 5
# determine whether this program needs to download mnist library
if not os.path.exists('../../mnist') or not os.listdir('../../mnist'):
DOWNLOAD_MNIST = True
# train_data
train_data = torchvision.datasets.MNIST(
root='../../mnist',
train=True,
download=DOWNLOAD_MNIST,
transform=torchvision.transforms.ToTensor()
)
# train_loader
train_loader = Data.DataLoader(
dataset=train_data,
shuffle=True,
num_workers=2,
batch_size=BATCH_SIZE
)
# autoencoder
class Autoencoder(torch.nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encode = torch.nn.Sequential(
torch.nn.Linear(28 * 28, 128),
torch.nn.Tanh(),
torch.nn.Linear(128, 64),
torch.nn.Tanh(),
torch.nn.Linear(64, 12),
torch.nn.Tanh(),
torch.nn.Linear(12, 3)
)
self.decode = torch.nn.Sequential(
torch.nn.Linear(3, 12),
torch.nn.Tanh(),
torch.nn.Linear(12, 64),
torch.nn.Tanh(),
torch.nn.Linear(64, 128),
torch.nn.Tanh(),
torch.nn.Linear(128, 28 * 28),
torch.nn.Sigmoid()
)
def forward(self, x):
encoded = self.encode(x)
decoded = self.decode(encoded)
return encoded, decoded
# 实例化
autoencoder = Autoencoder()
# 优化器
optimizer = torch.optim.RMSprop(autoencoder.parameters(), lr=LR)
# 损失函数
loss_func = torch.nn.MSELoss()
f, a = plt.subplots(2, N_TEST_IMG, figsize=(5, 2))
plt.ion()
view_data = train_data.train_data[:N_TEST_IMG].view(-1, 28 * 28).type(torch.FloatTensor) / 255.
for i in range(N_TEST_IMG):
a[0][i].imshow(np.reshape(view_data.data.numpy()[i], (28, 28)), cmap='gray')
a[0][i].set_xticks(())
a[0][i].set_yticks(())
# 训练
if __name__ == '__main__':
for epoch in range(EPOCH):
for step, (x, y) in enumerate(train_loader):
train_x = Variable(x.view(-1, 28 * 28))
train_y = Variable(x.view(-1, 28 * 28))
encoded, decoded = autoencoder(train_x)
loss = loss_func(decoded, train_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 100 == 0:
print('epoch:', epoch, ' | loss:%.4f' % loss.data.numpy())
_, decoded_data = autoencoder(view_data)
for i in range(N_TEST_IMG):
a[1][i].clear()
a[1][i].imshow(np.reshape(decoded_data.data.numpy()[i], (28, 28)), cmap='gray')
a[1][i].set_xticks(())
a[1][i].set_yticks(())
plt.draw()
plt.pause(0.05)
plt.ioff()
plt.show()
epoch: 9 | loss:0.0380
epoch: 9 | loss:0.0328
epoch: 9 | loss:0.0323
epoch: 9 | loss:0.0350
epoch: 9 | loss:0.0334
epoch: 9 | loss:0.0321
epoch: 9 | loss:0.0335
epoch: 9 | loss:0.0348
epoch: 9 | loss:0.0322
epoch: 9 | loss:0.0328