第G1周:生成对抗网络(GAN)入门

基础任务

  1. 了解什么是生成对抗网络
  2. 生成对抗网络结构是怎么样的
  3. 学习本文代码,并跑通代码

一、前期准备

  1. 定义超参数
import argparse
import os
import numpy as np
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch

## 创建文件夹
os.makedirs('./images',exist_ok=True)
os.makedirs('./save/',exist_ok=True)
os.makedirs('./datasets/mnist',exist_ok=True)

## 超参数配置
n_epochs = 50
batch_size=64
lr=0.0002
b1=0.5
b2=0.999
n_cpu=2
latent_dim=100
img_size=28
channels=1
sample_interval=400

## 图像的尺寸:(1,28,28),和图像的像素面积:(784)
img_shape=(channels,img_size,img_size)
img_area=np.prod(img_shape)

## 设置cuda
cuda = True if torch.cuda.is_available() else False
print(cuda)
  1. 下载数据
minist = datasets.MNIST(
    root='./datasets',train=True,download=True,transform=transforms.Compose(
        [transforms.Resize(img_size),transforms.ToTensor(),transforms.Normalize([0.5],[0.5])]
    )
)
  1. 配置数据
dataloader=DataLoader(minist,batch_size=batch_size,shuffle=True)

二、定义模型

  1. 定义鉴别器
## #####定义判别器 Discriminator ######
## 将图片28*28展开成784,然后通过多层感知器,中间经过斜率设置为0.2的LeakyReLU激活函数,最后接sigmoid激活函数得到一个0到1之间的概率进行二分类
class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator,self).__init__()
        self.model = nn.Sequential(
            nn.Linear(img_area,512),
            nn.LeakyReLU(0.2,inplace=True),
            nn.Linear(512,256),
            nn.LeakyReLU(0.2,inplace=True),
            nn.Linear(256,1),
            nn.Sigmoid()
        )
    def forward(self,img):
        img_flat=img.view(img.size(0),-1)
        validity=self.model(img_flat)
        return validity
  1. 定义生成器
## #### 定义生成器 Generator ######
## 输入一个100维的0~1之间的高斯分布,然后通过第一层线性变换将其映射到256维,
## 然后通过LeaKyReLU激活函数,接着进行一个线性变换,再经过一个LeaKyReLU激活函数,
## 然后经过线性变换将其变成784维,最后经过Tanh激活函数是希望生成的假的图片数据分布,能够在-1~1之间。
class Generator(nn.Module):
    def __init__(self):
        super(Generator,self).__init__()
        ## 模型中间块儿
        def block(in_feat,out_feat,normalize=True):
            layers=[nn.Linear(in_feat,out_feat)]
            if normalize:
                layers.append(nn.BatchNorm1d(out_feat,0.8))
            layers.append(nn.LeakyReLU(0.2,inplace=True))
            return layers
        ## prod():返回给定轴上的数组元素的乘积:1*28*28=784
        self.model = nn.Sequential(
            *block(latent_dim,128,normalize=False),
            *block(128,256),
            *block(256,512),
            *block(512,1024),
            nn.Linear(1024,img_area),
            nn.Tanh()
        )
    def forward(self,z):   # z:输入的随机噪声
        imgs=self.model(z) # 噪声数据通过生成器模型
        imgs=imgs.view(imgs.size(0),*img_shape) # reshape成(64,1,28,28)
        return imgs # 输出为64张大小为(1,28,28)的图像

三、训练模型

  1. 创建实例
## 创建生成器,判别器对象
generator = Generator()
discriminator = Discriminator()

## 首先需要定义loss的度量方式(二分类的交叉熵)
criterion = torch.nn.BCELoss()

## 其次定义 优化函数,优化函数的学习率为0.0003
## betas:用于计算梯度以及梯度平方的运行平均值的系数
optimizer_G = torch.optim.Adam(generator.parameters(),lr=lr,betas=(b1,b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(),lr=lr,betas=(b1,b2))

if torch.cuda.is_available():
    generator = generator.cuda()
    discriminator = discriminator.cuda()
    criterion = criterion.cuda()
  1. 训练模型
## 进行多个epoch的训练
for epoch in range(n_epochs):
    for i, (imgs,_) in enumerate(dataloader):

        ## 训练判别器
        imgs = imgs.view(imgs.size(0),-1)
        real_img = Variable(imgs).cuda() # 将tensor变成Variable放入计算图中,tensor变成variable之后才能进行反向传播求梯度
        real_label = Variable(torch.ones(imgs.size(0),1)).cuda() # 定义真实的图片label为1
        fake_label = Variable(torch.zeros(imgs.size(0),1)).cuda() # 定义假的图片的label为0

        ## Train Discriminator
        ## 分为两部分:1、真的图像判别为真;2、假的图像判别为假
        ## 计算真实图片的损失
        real_out = discriminator(real_img) # 将真实图片放入判别器中
        loss_real_D = criterion(real_out,real_label) # 得到真实图片的loss
        real_scores = real_out # 得到真实图片的判别值,输出的值越接近1越好
        ## 计算假的图片的损失
        ## detach(): 从当前的计算图中分离下来避免梯度传到G,因为G不需要更新
        z = Variable(torch.randn(imgs.size(0),latent_dim)).cuda() # 随机生成一些噪声,大小为(128,100)
        fake_img = generator(z).detach() # 生成一些假的图片
        fake_out = discriminator(fake_img) # 判别器判断假的图片
        loss_fake_D = criterion(fake_out,fake_label) # 得到假的图片的loss
        fake_scores = fake_out # 得到假的图片的判别值,输出的值越接近0越好
        ## 损失函数和优化
        loss_D = loss_real_D + loss_fake_D
        optimizer_D.zero_grad() # 梯度归零
        loss_D.backward()
        optimizer_D.step()
        
        ## Train Generator
        ## 原理:目的是希望生成的假的图片被判别器判断为真的图片,
        ## 在此过程中,将判别器固定,将假的图片传入判别器的结果与真实的label对应,
        ## 反向传播更新的参数是生成网络里面的参数,
        ## 这样可以通过更新生成网络里面的参数,来训练网络,使得生成的图片让判别器以为是真的,这样就达到了对抗的目的

        z = Variable(torch.randn(imgs.size(0),latent_dim)).cuda()
        fake_img = generator(z)
        output = discriminator(fake_img)
        ## 损失函数和优化
        loss_G = criterion(output,real_label)
        optimizer_G.zero_grad()
        loss_G.backward()
        optimizer_G.step()

        ## 打印训练过程中的日志
        ## item():取出单元素张量的元素值并返回该值,保持原元素类型不变
        if (i+1) % 300 ==0:
            print('[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [D real: %f] [D fake: %f]' % (epoch,n_epochs,i,len(dataloader),loss_D.item(),loss_G.item(),real_scores.data.mean(),fake_scores.data.mean()))
        ## 保存训练过程中的图像
        batches_done = epoch * len(dataloader) +i
        if batches_done % sample_interval ==0:
            save_image(fake_img.data[:25],'./images/%d.png' % batches_done,nrow=5,normalize=True)
[Epoch 0/50] [Batch 299/938] [D loss: 1.046568] [G loss: 1.103128] [D real: 0.542673] [D fake: 0.286822]
[Epoch 0/50] [Batch 599/938] [D loss: 0.826762] [G loss: 1.310948] [D real: 0.620206] [D fake: 0.234421]
[Epoch 0/50] [Batch 899/938] [D loss: 0.836223] [G loss: 1.263961] [D real: 0.606355] [D fake: 0.239081]
[Epoch 1/50] [Batch 299/938] [D loss: 1.449718] [G loss: 3.131555] [D real: 0.910527] [D fake: 0.730497]
[Epoch 1/50] [Batch 599/938] [D loss: 1.045807] [G loss: 1.874437] [D real: 0.738595] [D fake: 0.496402]
[Epoch 1/50] [Batch 899/938] [D loss: 1.043733] [G loss: 2.978690] [D real: 0.895167] [D fake: 0.585491]
[Epoch 2/50] [Batch 299/938] [D loss: 1.017330] [G loss: 0.996245] [D real: 0.576980] [D fake: 0.289604]
[Epoch 2/50] [Batch 599/938] [D loss: 0.729240] [G loss: 1.226739] [D real: 0.674941] [D fake: 0.223582]
[Epoch 2/50] [Batch 899/938] [D loss: 1.158968] [G loss: 0.560629] [D real: 0.478467] [D fake: 0.149279]
[Epoch 3/50] [Batch 299/938] [D loss: 0.717277] [G loss: 1.760776] [D real: 0.683348] [D fake: 0.217228]
[Epoch 3/50] [Batch 599/938] [D loss: 0.838745] [G loss: 2.910796] [D real: 0.850525] [D fake: 0.461453]
[Epoch 3/50] [Batch 899/938] [D loss: 0.898099] [G loss: 1.662527] [D real: 0.729085] [D fake: 0.377981]
[Epoch 4/50] [Batch 299/938] [D loss: 0.661619] [G loss: 1.563544] [D real: 0.696057] [D fake: 0.170760]
[Epoch 4/50] [Batch 599/938] [D loss: 0.727032] [G loss: 1.178997] [D real: 0.718513] [D fake: 0.228449]
[Epoch 4/50] [Batch 899/938] [D loss: 0.851134] [G loss: 2.420134] [D real: 0.859826] [D fake: 0.458913]
[Epoch 5/50] [Batch 299/938] [D loss: 1.005043] [G loss: 1.085370] [D real: 0.632908] [D fake: 0.239408]
[Epoch 5/50] [Batch 599/938] [D loss: 0.796756] [G loss: 3.301892] [D real: 0.850631] [D fake: 0.423853]
[Epoch 5/50] [Batch 899/938] [D loss: 0.502513] [G loss: 2.404667] [D real: 0.876271] [D fake: 0.261685]
[Epoch 6/50] [Batch 299/938] [D loss: 0.719842] [G loss: 1.413706] [D real: 0.688405] [D fake: 0.144973]
[Epoch 6/50] [Batch 599/938] [D loss: 1.122348] [G loss: 0.836194] [D real: 0.532562] [D fake: 0.121853]
[Epoch 6/50] [Batch 899/938] [D loss: 0.852489] [G loss: 3.282957] [D real: 0.869516] [D fake: 0.447280]
[Epoch 7/50] [Batch 299/938] [D loss: 0.845143] [G loss: 1.011752] [D real: 0.649581] [D fake: 0.183550]
[Epoch 7/50] [Batch 599/938] [D loss: 0.679232] [G loss: 1.766937] [D real: 0.763167] [D fake: 0.254355]
[Epoch 7/50] [Batch 899/938] [D loss: 0.731782] [G loss: 2.702012] [D real: 0.812933] [D fake: 0.347938]
[Epoch 8/50] [Batch 299/938] [D loss: 0.632154] [G loss: 1.806969] [D real: 0.778959] [D fake: 0.263381]
[Epoch 8/50] [Batch 599/938] [D loss: 0.861857] [G loss: 1.336450] [D real: 0.687024] [D fake: 0.303734]
[Epoch 8/50] [Batch 899/938] [D loss: 0.649472] [G loss: 2.649270] [D real: 0.815069] [D fake: 0.294691]
[Epoch 9/50] [Batch 299/938] [D loss: 0.919185] [G loss: 1.701303] [D real: 0.752959] [D fake: 0.389701]
[Epoch 9/50] [Batch 599/938] [D loss: 0.934856] [G loss: 0.925332] [D real: 0.542160] [D fake: 0.095689]
[Epoch 9/50] [Batch 899/938] [D loss: 0.861954] [G loss: 1.905135] [D real: 0.764802] [D fake: 0.361087]
[Epoch 10/50] [Batch 299/938] [D loss: 0.968508] [G loss: 1.355248] [D real: 0.668179] [D fake: 0.338849]
[Epoch 10/50] [Batch 599/938] [D loss: 0.902449] [G loss: 1.227040] [D real: 0.668905] [D fake: 0.300748]
[Epoch 10/50] [Batch 899/938] [D loss: 0.840927] [G loss: 1.396381] [D real: 0.756327] [D fake: 0.350345]
[Epoch 11/50] [Batch 299/938] [D loss: 1.097970] [G loss: 1.186465] [D real: 0.496688] [D fake: 0.083669]
[Epoch 11/50] [Batch 599/938] [D loss: 0.868512] [G loss: 1.579520] [D real: 0.685732] [D fake: 0.308981]
[Epoch 11/50] [Batch 899/938] [D loss: 0.858358] [G loss: 2.010076] [D real: 0.772618] [D fake: 0.384770]
[Epoch 12/50] [Batch 299/938] [D loss: 0.913319] [G loss: 1.575100] [D real: 0.632721] [D fake: 0.258393]
[Epoch 12/50] [Batch 599/938] [D loss: 0.806947] [G loss: 1.670362] [D real: 0.748267] [D fake: 0.345162]
[Epoch 12/50] [Batch 899/938] [D loss: 0.830845] [G loss: 1.857230] [D real: 0.748009] [D fake: 0.360119]
[Epoch 13/50] [Batch 299/938] [D loss: 0.967458] [G loss: 1.143945] [D real: 0.624886] [D fake: 0.226814]
[Epoch 13/50] [Batch 599/938] [D loss: 1.150156] [G loss: 2.440469] [D real: 0.794930] [D fake: 0.525456]
[Epoch 13/50] [Batch 899/938] [D loss: 0.900482] [G loss: 1.092703] [D real: 0.649093] [D fake: 0.298936]
[Epoch 14/50] [Batch 299/938] [D loss: 0.871572] [G loss: 1.335352] [D real: 0.664516] [D fake: 0.297240]
[Epoch 14/50] [Batch 599/938] [D loss: 1.243352] [G loss: 2.178689] [D real: 0.817467] [D fake: 0.594458]
[Epoch 14/50] [Batch 899/938] [D loss: 1.041839] [G loss: 1.725569] [D real: 0.719620] [D fake: 0.440364]
[Epoch 15/50] [Batch 299/938] [D loss: 0.995310] [G loss: 1.013738] [D real: 0.573860] [D fake: 0.204083]
[Epoch 15/50] [Batch 599/938] [D loss: 0.895731] [G loss: 1.367230] [D real: 0.599801] [D fake: 0.211210]
[Epoch 15/50] [Batch 899/938] [D loss: 0.945497] [G loss: 1.914380] [D real: 0.744094] [D fake: 0.395435]
[Epoch 16/50] [Batch 299/938] [D loss: 0.917124] [G loss: 1.280150] [D real: 0.651614] [D fake: 0.304220]
[Epoch 16/50] [Batch 599/938] [D loss: 0.878740] [G loss: 1.031482] [D real: 0.596464] [D fake: 0.169260]
[Epoch 16/50] [Batch 899/938] [D loss: 1.011810] [G loss: 1.052837] [D real: 0.578375] [D fake: 0.247759]
[Epoch 17/50] [Batch 299/938] [D loss: 0.904772] [G loss: 1.164673] [D real: 0.646880] [D fake: 0.268164]
[Epoch 17/50] [Batch 599/938] [D loss: 1.008403] [G loss: 2.240973] [D real: 0.879795] [D fake: 0.523620]
[Epoch 17/50] [Batch 899/938] [D loss: 1.067138] [G loss: 0.999997] [D real: 0.544808] [D fake: 0.196808]
[Epoch 18/50] [Batch 299/938] [D loss: 0.859106] [G loss: 1.407268] [D real: 0.634341] [D fake: 0.244096]
[Epoch 18/50] [Batch 599/938] [D loss: 0.905877] [G loss: 1.607759] [D real: 0.715036] [D fake: 0.343714]
[Epoch 18/50] [Batch 899/938] [D loss: 0.977705] [G loss: 0.774759] [D real: 0.524107] [D fake: 0.129010]
[Epoch 19/50] [Batch 299/938] [D loss: 1.306353] [G loss: 2.261279] [D real: 0.819169] [D fake: 0.618986]
[Epoch 19/50] [Batch 599/938] [D loss: 1.108243] [G loss: 1.870013] [D real: 0.756651] [D fake: 0.496533]
[Epoch 19/50] [Batch 899/938] [D loss: 0.900681] [G loss: 1.771692] [D real: 0.746035] [D fake: 0.384694]
[Epoch 20/50] [Batch 299/938] [D loss: 0.962306] [G loss: 0.889691] [D real: 0.600129] [D fake: 0.217700]
[Epoch 20/50] [Batch 599/938] [D loss: 1.028519] [G loss: 1.448493] [D real: 0.677114] [D fake: 0.362585]
[Epoch 20/50] [Batch 899/938] [D loss: 0.953573] [G loss: 1.261735] [D real: 0.614279] [D fake: 0.267262]
[Epoch 21/50] [Batch 299/938] [D loss: 0.789372] [G loss: 2.004514] [D real: 0.800915] [D fake: 0.387526]
[Epoch 21/50] [Batch 599/938] [D loss: 0.819890] [G loss: 1.455880] [D real: 0.714904] [D fake: 0.305132]
[Epoch 21/50] [Batch 899/938] [D loss: 0.833287] [G loss: 1.742973] [D real: 0.794366] [D fake: 0.392303]
[Epoch 22/50] [Batch 299/938] [D loss: 1.057050] [G loss: 0.727257] [D real: 0.536639] [D fake: 0.222147]
[Epoch 22/50] [Batch 599/938] [D loss: 0.988399] [G loss: 1.211153] [D real: 0.567325] [D fake: 0.205189]
[Epoch 22/50] [Batch 899/938] [D loss: 0.852323] [G loss: 1.637496] [D real: 0.724115] [D fake: 0.322187]
[Epoch 23/50] [Batch 299/938] [D loss: 0.861801] [G loss: 1.437241] [D real: 0.663714] [D fake: 0.247673]
[Epoch 23/50] [Batch 599/938] [D loss: 0.922084] [G loss: 0.858493] [D real: 0.647349] [D fake: 0.299271]
[Epoch 23/50] [Batch 899/938] [D loss: 0.948497] [G loss: 1.224860] [D real: 0.669189] [D fake: 0.343460]
[Epoch 24/50] [Batch 299/938] [D loss: 0.983612] [G loss: 1.133340] [D real: 0.592831] [D fake: 0.246862]
[Epoch 24/50] [Batch 599/938] [D loss: 0.929822] [G loss: 1.247641] [D real: 0.784473] [D fake: 0.421617]
[Epoch 24/50] [Batch 899/938] [D loss: 1.039779] [G loss: 1.318752] [D real: 0.629335] [D fake: 0.334155]
[Epoch 25/50] [Batch 299/938] [D loss: 1.417979] [G loss: 2.573467] [D real: 0.877945] [D fake: 0.649123]
[Epoch 25/50] [Batch 599/938] [D loss: 0.841562] [G loss: 1.335131] [D real: 0.626154] [D fake: 0.151297]
[Epoch 25/50] [Batch 899/938] [D loss: 0.938627] [G loss: 1.566378] [D real: 0.652206] [D fake: 0.311294]
[Epoch 26/50] [Batch 299/938] [D loss: 0.911969] [G loss: 1.126373] [D real: 0.621509] [D fake: 0.254093]
[Epoch 26/50] [Batch 599/938] [D loss: 0.965744] [G loss: 0.977243] [D real: 0.589069] [D fake: 0.239343]
[Epoch 26/50] [Batch 899/938] [D loss: 0.890101] [G loss: 1.310513] [D real: 0.695716] [D fake: 0.344233]
[Epoch 27/50] [Batch 299/938] [D loss: 0.801683] [G loss: 1.275008] [D real: 0.682456] [D fake: 0.230567]
[Epoch 27/50] [Batch 599/938] [D loss: 1.090332] [G loss: 0.648755] [D real: 0.497413] [D fake: 0.146134]
[Epoch 27/50] [Batch 899/938] [D loss: 0.899746] [G loss: 1.012564] [D real: 0.559295] [D fake: 0.118455]
[Epoch 28/50] [Batch 299/938] [D loss: 0.879863] [G loss: 1.114848] [D real: 0.645971] [D fake: 0.272615]
[Epoch 28/50] [Batch 599/938] [D loss: 0.789727] [G loss: 1.309272] [D real: 0.705426] [D fake: 0.280931]
[Epoch 28/50] [Batch 899/938] [D loss: 1.108062] [G loss: 0.748481] [D real: 0.487862] [D fake: 0.144973]
[Epoch 29/50] [Batch 299/938] [D loss: 0.853529] [G loss: 1.413333] [D real: 0.721246] [D fake: 0.330325]
[Epoch 29/50] [Batch 599/938] [D loss: 0.988594] [G loss: 1.564626] [D real: 0.769820] [D fake: 0.445743]
[Epoch 29/50] [Batch 899/938] [D loss: 0.944268] [G loss: 1.358335] [D real: 0.607762] [D fake: 0.197268]
[Epoch 30/50] [Batch 299/938] [D loss: 1.208817] [G loss: 0.746673] [D real: 0.453257] [D fake: 0.169427]
[Epoch 30/50] [Batch 599/938] [D loss: 0.806272] [G loss: 1.575590] [D real: 0.694818] [D fake: 0.281416]
[Epoch 30/50] [Batch 899/938] [D loss: 1.005735] [G loss: 1.596589] [D real: 0.655657] [D fake: 0.298263]
[Epoch 31/50] [Batch 299/938] [D loss: 0.837121] [G loss: 1.672701] [D real: 0.684358] [D fake: 0.274048]
[Epoch 31/50] [Batch 599/938] [D loss: 0.935975] [G loss: 1.092769] [D real: 0.614314] [D fake: 0.253071]
[Epoch 31/50] [Batch 899/938] [D loss: 1.016526] [G loss: 1.326324] [D real: 0.702250] [D fake: 0.392737]
[Epoch 32/50] [Batch 299/938] [D loss: 0.846551] [G loss: 1.887030] [D real: 0.719636] [D fake: 0.318009]
[Epoch 32/50] [Batch 599/938] [D loss: 0.898159] [G loss: 1.550411] [D real: 0.810980] [D fake: 0.439940]
[Epoch 32/50] [Batch 899/938] [D loss: 0.876728] [G loss: 1.128204] [D real: 0.719555] [D fake: 0.362638]
[Epoch 33/50] [Batch 299/938] [D loss: 1.060772] [G loss: 0.996792] [D real: 0.592764] [D fake: 0.277146]
[Epoch 33/50] [Batch 599/938] [D loss: 0.810244] [G loss: 1.520665] [D real: 0.736881] [D fake: 0.320585]
[Epoch 33/50] [Batch 899/938] [D loss: 0.838425] [G loss: 2.205842] [D real: 0.815923] [D fake: 0.395423]
[Epoch 34/50] [Batch 299/938] [D loss: 0.895625] [G loss: 1.522561] [D real: 0.708644] [D fake: 0.323247]
[Epoch 34/50] [Batch 599/938] [D loss: 1.119952] [G loss: 2.270184] [D real: 0.802474] [D fake: 0.468687]
[Epoch 34/50] [Batch 899/938] [D loss: 0.860041] [G loss: 1.912927] [D real: 0.735016] [D fake: 0.349865]
[Epoch 35/50] [Batch 299/938] [D loss: 1.021953] [G loss: 0.740029] [D real: 0.536219] [D fake: 0.190995]
[Epoch 35/50] [Batch 599/938] [D loss: 0.754408] [G loss: 1.391147] [D real: 0.699189] [D fake: 0.213192]
[Epoch 35/50] [Batch 899/938] [D loss: 1.034781] [G loss: 1.072571] [D real: 0.585401] [D fake: 0.238148]
[Epoch 36/50] [Batch 299/938] [D loss: 0.762516] [G loss: 1.911605] [D real: 0.810809] [D fake: 0.365880]
[Epoch 36/50] [Batch 599/938] [D loss: 0.814482] [G loss: 1.235842] [D real: 0.748348] [D fake: 0.326373]
[Epoch 36/50] [Batch 899/938] [D loss: 0.854656] [G loss: 1.594869] [D real: 0.751039] [D fake: 0.333676]
[Epoch 37/50] [Batch 299/938] [D loss: 0.945577] [G loss: 1.969516] [D real: 0.805742] [D fake: 0.433199]
[Epoch 37/50] [Batch 599/938] [D loss: 0.852353] [G loss: 1.851444] [D real: 0.769653] [D fake: 0.322426]
[Epoch 37/50] [Batch 899/938] [D loss: 1.055028] [G loss: 1.716629] [D real: 0.800618] [D fake: 0.475555]
[Epoch 38/50] [Batch 299/938] [D loss: 0.776765] [G loss: 1.919511] [D real: 0.757362] [D fake: 0.303830]
[Epoch 38/50] [Batch 599/938] [D loss: 0.848294] [G loss: 1.283069] [D real: 0.649607] [D fake: 0.199237]
[Epoch 38/50] [Batch 899/938] [D loss: 0.879554] [G loss: 1.741035] [D real: 0.678150] [D fake: 0.268430]
[Epoch 39/50] [Batch 299/938] [D loss: 0.987097] [G loss: 1.080307] [D real: 0.583081] [D fake: 0.165889]
[Epoch 39/50] [Batch 599/938] [D loss: 0.855287] [G loss: 1.514237] [D real: 0.669856] [D fake: 0.223223]
[Epoch 39/50] [Batch 899/938] [D loss: 0.979833] [G loss: 1.047158] [D real: 0.570096] [D fake: 0.165358]
[Epoch 40/50] [Batch 299/938] [D loss: 0.783161] [G loss: 1.554477] [D real: 0.773152] [D fake: 0.311928]
[Epoch 40/50] [Batch 599/938] [D loss: 0.899651] [G loss: 1.291173] [D real: 0.631423] [D fake: 0.212277]
[Epoch 40/50] [Batch 899/938] [D loss: 0.775918] [G loss: 1.158898] [D real: 0.651590] [D fake: 0.176949]
[Epoch 41/50] [Batch 299/938] [D loss: 0.954476] [G loss: 1.894536] [D real: 0.759034] [D fake: 0.389677]
[Epoch 41/50] [Batch 599/938] [D loss: 0.809456] [G loss: 1.578750] [D real: 0.721699] [D fake: 0.293431]
[Epoch 41/50] [Batch 899/938] [D loss: 0.871926] [G loss: 1.882774] [D real: 0.812887] [D fake: 0.429435]
[Epoch 42/50] [Batch 299/938] [D loss: 0.955984] [G loss: 1.333502] [D real: 0.659386] [D fake: 0.289309]
[Epoch 42/50] [Batch 599/938] [D loss: 0.801338] [G loss: 1.717158] [D real: 0.721607] [D fake: 0.281809]
[Epoch 42/50] [Batch 899/938] [D loss: 0.799964] [G loss: 1.845830] [D real: 0.760781] [D fake: 0.286227]
[Epoch 43/50] [Batch 299/938] [D loss: 0.865469] [G loss: 1.718773] [D real: 0.764228] [D fake: 0.343645]
[Epoch 43/50] [Batch 599/938] [D loss: 0.860103] [G loss: 1.732769] [D real: 0.780447] [D fake: 0.387257]
[Epoch 43/50] [Batch 899/938] [D loss: 0.862966] [G loss: 1.309596] [D real: 0.668367] [D fake: 0.265996]
[Epoch 44/50] [Batch 299/938] [D loss: 0.994418] [G loss: 1.292833] [D real: 0.548938] [D fake: 0.127663]
[Epoch 44/50] [Batch 599/938] [D loss: 0.848912] [G loss: 1.751954] [D real: 0.751652] [D fake: 0.340239]
[Epoch 44/50] [Batch 899/938] [D loss: 0.933925] [G loss: 1.647963] [D real: 0.704723] [D fake: 0.314274]
[Epoch 45/50] [Batch 299/938] [D loss: 0.929099] [G loss: 1.472452] [D real: 0.694997] [D fake: 0.325997]
[Epoch 45/50] [Batch 599/938] [D loss: 0.827760] [G loss: 1.567965] [D real: 0.766641] [D fake: 0.323597]
[Epoch 45/50] [Batch 899/938] [D loss: 0.810614] [G loss: 1.402845] [D real: 0.741866] [D fake: 0.313477]
[Epoch 46/50] [Batch 299/938] [D loss: 0.883345] [G loss: 1.508560] [D real: 0.700936] [D fake: 0.307757]
[Epoch 46/50] [Batch 599/938] [D loss: 0.780084] [G loss: 1.801454] [D real: 0.793954] [D fake: 0.351703]
[Epoch 46/50] [Batch 899/938] [D loss: 0.889939] [G loss: 1.770669] [D real: 0.768195] [D fake: 0.365825]
[Epoch 47/50] [Batch 299/938] [D loss: 0.900580] [G loss: 1.583696] [D real: 0.807549] [D fake: 0.421236]
[Epoch 47/50] [Batch 599/938] [D loss: 0.982368] [G loss: 0.953073] [D real: 0.603222] [D fake: 0.194074]
[Epoch 47/50] [Batch 899/938] [D loss: 0.969824] [G loss: 2.342012] [D real: 0.820915] [D fake: 0.423516]
[Epoch 48/50] [Batch 299/938] [D loss: 0.844523] [G loss: 1.338581] [D real: 0.646710] [D fake: 0.206845]
[Epoch 48/50] [Batch 599/938] [D loss: 0.722861] [G loss: 1.433848] [D real: 0.751154] [D fake: 0.248125]
[Epoch 48/50] [Batch 899/938] [D loss: 0.803689] [G loss: 1.892171] [D real: 0.807257] [D fake: 0.345935]
[Epoch 49/50] [Batch 299/938] [D loss: 0.888761] [G loss: 1.694622] [D real: 0.766204] [D fake: 0.352355]
[Epoch 49/50] [Batch 599/938] [D loss: 0.901187] [G loss: 1.721451] [D real: 0.710513] [D fake: 0.286541]
[Epoch 49/50] [Batch 899/938] [D loss: 0.898905] [G loss: 1.417452] [D real: 0.702455] [D fake: 0.306893]
  1. 保存模型
## 保存模型
torch.save(generator.state_dict(),'./save/generator.pth')
torch.save(discriminator.state_dict(),'./save/discriminator.pth')

四、总结

  • 生成对抗网络就是由两部分组成的,生成器负责生成图像,判别器对生成的图像进行判断
  • 简单来说,当判别器将生成的图像判断为真时,就表明生成器的效果不错
  • 8
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值