GAN网络的简单示例

文章目录

# 假设图片从100的noise中产生
import torch
import torchvision
import numpy
import torch.nn as nn
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchvision.utils import save_image

# 相关参数
latent_dim = 100
batch_size = 128
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize([0.5], [0.5])])

# 准备数据
data = datasets.MNIST(root='./data/mnist', train=True, transform=transform, download=True)
data_loader = DataLoader(data, batch_size=batch_size, shuffle=True, num_workers=4)

# 搭建model
class Genetator(nn.Module):
    def __init__(self):
        super(Genetator, self).__init__()
        self.models = nn.Sequential(
            nn.Linear(latent_dim, 128),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(128, 256),
            nn.BatchNorm1d(256, 0.8),
            # 把逐渐向非线性函数映射后向取值区间极限饱和区靠拢的输入分布强制拉回到比较标准的正态分布,
            # 使得非线性变换函数的输入值落入对输入比较敏感的区域,以此避免梯度消失问题
            # Batch Norm主要是为了输入在激活函数的敏感区。所以BatchNorm层要加在激活函数前面
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(256, 512),
            nn.BatchNorm1d(512,0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(512, 1024),
            nn.BatchNorm1d(1024, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(1024, 784),
            nn.Tanh(),
        )

    def forward(self, x):
        batch_size = x.shape[0]  # 输入到生成器的是一个batch的噪音吗
        x = self.models(x)
        x = x.view(batch_size, 1, 28, 28)
        return x

class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.model = nn.Sequential(
            nn.Linear(784, 512),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(512, 256),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(256, 1),
            # 判别器是一个二分类,所以就输出0或1
            nn.Sigmoid(),

        )

    def forward(self, x):
        batch_size = x.shape[0]
        x = x.view(batch_size, -1)
        # Flatten操作,展平为784个像素后再操作
        validity = self.model(x)
        return validity

# 建立model
generator = Genetator()
generator.to(device)
discrimator = Discriminator()
discrimator.to(device)

optimizer_G = torch.optim.Adam(generator.parameters(), lr=0.0002)
optimizer_D = torch.optim.Adam(discrimator.parameters(), lr=0.0002)
criterion = nn.BCELoss()



def train(epoch):
    for i, (imgs, _) in enumerate(data_loader):
        imgs = imgs.to(device)
        valid = Variable(torch.Tensor(imgs.size(0), 1).fill_(1.0)).to(device)
        # Tensor会识别为维度,也就是(batch, 1)这样的维度
        fake = Variable(torch.Tensor(imgs.size(0), 1).fill_(0.0)).to(device)

        '''
        这是第一种训练方法,先训练generator
        '''

        # 随机噪声z
        z = Variable(torch.Tensor(numpy.random.normal(0, 1, (imgs.size(0), latent_dim)))).to(device)
        # z的维度是(batch, 100)

        gen_imgs = generator(z)
        optimizer_G.zero_grad()
        g_loss = criterion(discrimator(gen_imgs), valid)
        # 梯度清零并训练生成器

        g_loss.backward()
        optimizer_G.step()

        # 下面是对判别器的训练, 判别器有两个loss

        optimizer_D.zero_grad()
        real_loss = criterion(discrimator(imgs), valid)
        fake_loss = criterion(discrimator(gen_imgs.detach()), fake)
        # 这里有梯度的截断
        d_loss = (real_loss + fake_loss) / 2


        d_loss.backward()
        optimizer_D.step()
        if i % 50 == 0:
            print(
                "[Epoch %d/100] [Batch %d/%d] [D loss: %f] [G loss: %f]"
                % (epoch,  i, len(data_loader), d_loss.item(), g_loss.item())
            )
    save_image(gen_imgs.data[:25], 'images{}.png'.format(epoch), nrow=5, normalize=True)
    # 这是按第一个维度截断的,所以是batch里面取了25个

if __name__ == '__main__':
    epoches = 100
    for epoch in range(epoches):
        train(epoch)


  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

live_for_myself

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值