代码
本代码基于python编写 pytorch=1.3 IDE为pycharm
import os
import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
from torchvision.utils import save_image
import matplotlib.pyplot as plt
import cv2
# 设备配置
torch.cuda.set_device(0) # 这句用来设置pytorch在哪块GPU上运行,这里假设使用序号为1的这块GPU.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 定义一些超参数
latent_size = 64
hidden_size = 256
image_size = 784
num_epochs = 200
batch_size = 100
sample_dir = 'gan_samples'
# 在当前目录,创建不存在的目录gan_samples
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
# Image processing
trans = transforms.Compose([
transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])
# MNIST dataset
mnist = torchvision.datasets.MNIST(root='data3',
train=True,
transform=trans,
download=True)
# Data loader
data_loader = torch.utils.data.DataLoader(dataset=mnist,
batch_size=batch_size,
shuffle=True)
# 构建判断器
D = nn.Sequential(
nn.Linear(image_size, hidden_size),
nn.LeakyReLU(0.2),
nn.Linear(hidden_size, hidden_size),
nn.LeakyReLU(0.2),
nn.Linear(hidden_size, 1),
nn.Sigmoid())
# 构建生成器,这个相当于AVE中的解码器
G = nn.Sequential(
nn.Linear(latent_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, image_size),
nn.Tanh())
# 把判别器和生成器迁移到GPU上
D = D.to(device)
G = G.to(device)
# 定义判别器的损失函数交叉熵及优化器
criterion = nn.BCELoss()
d_optimizer = torch.optim.Adam(D.parameters(), lr=0.0002)
g_optimizer = torch.optim.Adam(G.parameters(), lr=0.0002)
# Clamp函数x限制在区间[min, max]内
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def reset_grad():
d_optimizer.zero_grad()
g_optimizer.zero_grad()
# 开始训练
total_step = len(data_loader)
for epoch in range(num_epochs):
for i, (images, _) in enumerate(data_loader):
images = images.reshape(batch_size, -1).to(device)
# 定义图像是真或假的标签
real_labels = torch.ones(batch_size, 1).to(device)
fake_labels = torch.zeros(batch_size, 1).to(device)
# ================================================================== #
# 训练判别器 #
# ================================================================== #
# 定义判断器对真图片的损失函数
outputs = D(images)
d_loss_real = criterion(outputs, real_labels)
real_score = outputs
# 定义判别器对假图片(即由潜在空间点生成的图片)的损失函数
z = torch.randn(batch_size, latent_size).to(device)
fake_images = G(z)
outputs = D(fake_images)
d_loss_fake = criterion(outputs, fake_labels)
fake_score = outputs
# 得到判别器总的损失函数
d_loss = d_loss_real + d_loss_fake
# 对生成器、判别器的梯度清零
reset_grad()
d_loss.backward()
d_optimizer.step()
# ================================================================== #
# 训练生成器 #
# ================================================================== #
# 定义生成器对假图片的损失函数,这里我们要求
# 判别器生成的图片越来越像真图片,故损失函数中
# 的标签改为真图片的标签,即希望生成的假图片,
# 越来越靠近真图片
z = torch.randn(batch_size, latent_size).to(device)
fake_images = G(z)
outputs = D(fake_images)
g_loss = criterion(outputs, real_labels)
# 对生成器、判别器的梯度清零
# 进行反向传播及运行生成器的优化器
reset_grad()
g_loss.backward()
g_optimizer.step()
if (i + 1) % 200 == 0:
print('Epoch [{}/{}], Step [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}, D(x): {:.2f}, D(G(z)): {:.2f}'
.format(epoch, num_epochs, i + 1, total_step, d_loss.item(), g_loss.item(),
real_score.mean().item(), fake_score.mean().item()))
# 保存真图片
if (epoch + 1) == 1:
images = images.reshape(images.size(0), 1, 28, 28)
save_image(denorm(images), os.path.join(sample_dir, 'real_images.png'))
# 保存假图片
fake_images = fake_images.reshape(fake_images.size(0), 1, 28, 28)
save_image(denorm(fake_images), os.path.join(sample_dir, 'fake_images-{}.png'.format(epoch + 1)))
# 保存模型
torch.save(G.state_dict(), 'G.ckpt')
torch.save(D.state_dict(), 'D.ckpt')
reconsPath = './gan_samples/real_images.png'
Image = cv2.imread(reconsPath)
plt.imshow(Image) # 显示图片
plt.axis('off') # 不显示坐标轴
plt.show()
reconsPath = './gan_samples/fake_images-200.png'
Image = cv2.imread(reconsPath)
plt.imshow(Image) # 显示图片
plt.axis('off') # 不显示坐标轴
plt.show()