一个GAN的简单例子

代码看了一天.其实我现在并没有完全搞懂........看懂的地方标记了注释,后面再做补充

import argparse
import numpy as np
from scipy.stats import norm
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns  #Seaborn是比Matplotlib更高级的免费库
import cProfile

Count=0
seed = 42
np.random.seed(seed)
tf.set_random_seed(seed)


class DataDistribution(object): #真实数据分布
    def __init__(self):#只执行一次
        self.mu = 4 #线 的参数  高斯分布相关

        self.sigma = 0.5


    def sample(self, N):#每次迭代都运行
        samples = np.random.normal(self.mu, self.sigma, N)   #正态分布
        samples.sort()  #画图   原来真实分布

        return samples


class GeneratorDistribution(object):      #随机初始化分布,当成G输入
    def __init__(self, range):
        self.range = range


    def sample(self, N):                      #迭代次数的两倍

        return np.linspace(-self.range, self.range, N) + \
            np.random.random(N) * 0.01


def static_vars(**kwargs):
    def decorate(func):
        for k in kwargs:
            setattr(func, k, kwargs[k])
        return func
    return decorate


def linear(input, output_dim, scope=None, stddev=1.0):#100次迭代 运行了14次
    norm = tf.random_normal_initializer(stddev=stddev)   #随机 w
    const = tf.constant_initializer(0.0)     # b

    with tf.variable_scope(scope or 'linear'):

        w = tf.get_variable('w', [input.get_shape()[1], output_dim], initializer=norm)#随机w
        b = tf.get_variable('b', [output_dim], initializer=const)
        return tf.matmul(input, w) + b


def generator(input, h_dim):    
    h0 = tf.nn.softplus(linear(input, h_dim, 'g0'))
    h1 = linear(h0, 1, 'g1')

    return h1


def discriminator(input, h_dim):  #  #初始化 w b
    h0 = tf.tanh(linear(input, h_dim * 2, 'd0'))
    h1 = tf.tanh(linear(h0, h_dim * 2, 'd1'))   
    h2 = tf.tanh(linear(h1, h_dim * 2, scope='d2'))

    h3 = tf.sigmoid(linear(h2, 1, scope='d3'))

    return h3

#@static_vars(counter = 0)
def optimizer(loss, var_list, initial_learning_rate):

    decay = 0.95

    num_decay_steps = 150    #每迭代150次 做一次学习率的衰减
    batch = tf.Variable(0)

    learning_rate = tf.train.exponential_decay(
        initial_learning_rate,
        batch,
        num_decay_steps,
        decay,
        staircase=True
    )
    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss,
        global_step=batch,
        var_list=var_list
    )
    #optimizer.counter += 1

    #print("counter:", optimizer.counter)
    return optimizer


class GAN(object):
    def __init__(self, data, gen, num_steps, batch_size, log_every):
        self.data = data    #DataDistribution
        self.gen = gen
        self.num_steps = num_steps
        self.batch_size = batch_size
        self.log_every = log_every
        self.mlp_hidden_size = 4   #用神经网络模型当G D 网络时,隐层神经元个数
        self.count = 0
        self.learning_rate = 0.03  #学习率

        self._create_model()


    def _create_model(self):#   1/100次

        with tf.variable_scope('D_pre'):#先训练D的初始化
            self.pre_input = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            self.pre_labels = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            D_pre = discriminator(self.pre_input, self.mlp_hidden_size)   #初始化 w b
            self.pre_loss = tf.reduce_mean(tf.square(D_pre - self.pre_labels))
            self.pre_opt = optimizer(self.pre_loss, None, self.learning_rate)

        # This defines the generator network - it takes samples from a noise
        # distribution as input, and passes them through an MLP.
        with tf.variable_scope('Gen'):
            self.z = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            self.G = generator(self.z, self.mlp_hidden_size)

        # The discriminator tries to tell the difference between samples from the
        # true data distribution (self.x) and the generated samples (self.z).
        #
        # Here we create two copies of the discriminator network (that share parameters),
        # as you cannot use the same network with different inputs in TensorFlow.
        with tf.variable_scope('Disc') as scope:
            self.x = tf.placeholder(tf.float32, shape=(self.batch_size, 1))
            self.D1 = discriminator(self.x, self.mlp_hidden_size)#构造D1网络,真实数据当做输入
            scope.reuse_variables()#重用
            self.D2 = discriminator(self.G, self.mlp_hidden_size)#构造D2网络,G数据当做输入

        # Define the loss for discriminator and generator networks (see the original
        # paper for details), and create optimizers for both
        self.loss_d = tf.reduce_mean(-tf.log(self.D1) - tf.log(1 - self.D2))#希望self.D1 向1...self.D2 向0   计算结果是均值
        self.loss_g = tf.reduce_mean(-tf.log(self.D2))# 1

        self.d_pre_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='D_pre')
        self.d_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Disc')
        self.g_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Gen')

        self.opt_d = optimizer(self.loss_d, self.d_params, self.learning_rate)
        self.opt_g = optimizer(self.loss_g, self.g_params, self.learning_rate)

    def train(self):#  1

        with tf.Session() as session:
            tf.global_variables_initializer().run()

            # pretraining discriminator
            num_pretrain_steps = 1000  
            for step in range(num_pretrain_steps):
                d = (np.random.random(self.batch_size) - 0.5) * 10.0
                labels = norm.pdf(d, loc=self.data.mu, scale=self.data.sigma)  #高斯
                pretrain_loss, _ = session.run([self.pre_loss, self.pre_opt], {
                    self.pre_input: np.reshape(d, (self.batch_size, 1)),
                    self.pre_labels: np.reshape(labels, (self.batch_size, 1))
                })
            self.weightsD = session.run(self.d_pre_params)
            # copy weights from pre-training over to new D network
            for i, v in enumerate(self.d_params):
                session.run(v.assign(self.weightsD[i]))

            for step in range(self.num_steps):
                # update discriminator
                x = self.data.sample(self.batch_size)                  #原始的分布图  x是随机的
                z = self.gen.sample(self.batch_size)
                loss_d, _ = session.run([self.loss_d, self.opt_d], {    #自动调网络?????
                    self.x: np.reshape(x, (self.batch_size, 1)),
                    self.z: np.reshape(z, (self.batch_size, 1))
                })

                # update generator
                z = self.gen.sample(self.batch_size)
                loss_g, _ = session.run([self.loss_g, self.opt_g], {
                    self.z: np.reshape(z, (self.batch_size, 1))
                })

                if step % self.log_every == 0:
                    print('{}: {}\t{}'.format(step, loss_d, loss_g))                
                if step % 100 == 0 or step==0 or step == self.num_steps -1 :
                    self._plot_distributions(session)

    def _samples(self, session, num_points=10000, num_bins=100):# 2/100次

        xs = np.linspace(-self.gen.range, self.gen.range, num_points)
        bins = np.linspace(-self.gen.range, self.gen.range, num_bins)

        # data distribution
        d = self.data.sample(num_points)
        pd, _ = np.histogram(d, bins=bins, density=True)

        # generated samples
        zs = np.linspace(-self.gen.range, self.gen.range, num_points)
        g = np.zeros((num_points, 1))
        for i in range(num_points // self.batch_size):
            g[self.batch_size * i:self.batch_size * (i + 1)] = session.run(self.G, {
                self.z: np.reshape(
                    zs[self.batch_size * i:self.batch_size * (i + 1)],
                    (self.batch_size, 1)
                )
            })
        pg, _ = np.histogram(g, bins=bins, density=True)

        return pd, pg

    def _plot_distributions(self, session):# 2/100

        pd, pg = self._samples(session)
        p_x = np.linspace(-self.gen.range, self.gen.range, len(pd))
        f, ax = plt.subplots(1)
        ax.set_ylim(0, 1)
        plt.plot(p_x, pd, label='real data')
        plt.plot(p_x, pg, label='generated data')
        plt.title('1D Generative Adversarial Network')
        plt.xlabel('Data values')
        plt.ylabel('Probability density')
        plt.legend()
        plt.show()
def main(args):

    model = GAN(
        DataDistribution(),   #真实数据分布   只执行一下init
        GeneratorDistribution(range=8),#随机初始化分布 当成G 输入
        args.num_steps,  #迭代次数
        args.batch_size, #一次迭代 个
        args.log_every,  #隔多少次打印loss
    )
    model.train()


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--num-steps', type=int, default=1200,#1200
                        help='the number of training steps to take')
    parser.add_argument('--batch-size', type=int, default=12,
                        help='the batch size')
    parser.add_argument('--log-every', type=int, default=10,
                        help='print loss after this many steps')
    return parser.parse_args()


if __name__ == '__main__':

    main(parse_args())

 

 

以下是一个简单GAN代码实例,用于生成手写数字图片: ```python import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms from torch.utils.data import DataLoader import numpy as np import matplotlib.pyplot as plt # 定义生成器 class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() self.main = nn.Sequential( nn.Linear(100, 256), nn.ReLU(), nn.Linear(256, 512), nn.ReLU(), nn.Linear(512, 784), nn.Tanh() ) def forward(self, x): img = self.main(x) img = img.view(-1, 28, 28) # 转换成图片的形式 return img # 定义判别器 class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.main = nn.Sequential( nn.Linear(784, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1), nn.Sigmoid() ) def forward(self, x): x = x.view(-1, 784) validity = self.main(x) return validity # 定义训练函数 def train(generator, discriminator, dataloader, optimizer_G, optimizer_D, criterion, device): for epoch in range(200): for i, (imgs, _) in enumerate(dataloader): # 训练判别器 optimizer_D.zero_grad() real_imgs = imgs.to(device) real_labels = torch.ones((real_imgs.size(0), 1)).to(device) fake_labels = torch.zeros((real_imgs.size(0), 1)).to(device) # 生成假图片 z = torch.randn((real_imgs.size(0), 100)).to(device) fake_imgs = generator(z) # 判别真假图片 real_loss = criterion(discriminator(real_imgs), real_labels) fake_loss = criterion(discriminator(fake_imgs.detach()), fake_labels) d_loss = (real_loss + fake_loss) / 2 d_loss.backward() optimizer_D.step() # 训练生成器 optimizer_G.zero_grad() z = torch.randn((real_imgs.size(0),100)).to(device) fake_imgs = generator(z) g_loss = criterion(discriminator(fake_imgs), real_labels) g_loss.backward() optimizer_G.step() # 打印损失 print("[Epoch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, 200, d_loss.item(), g_loss.item())) # 保存生成的图片 if epoch % 10 == 0: z = torch.randn((25, 100)).to(device) gen_imgs = generator(z).detach().cpu() save_image(gen_imgs.data, "images/%d.png" % epoch, nrow=5, normalize=True) # 加载数据集 transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)) ]) mnist_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True) dataloader = DataLoader(mnist_dataset, batch_size=128, shuffle=True) # 定义设备 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 定义模型、优化器和损失函数 generator = Generator().to(device) discriminator = Discriminator().to(device) optimizer_G = optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999)) optimizer_D = optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999)) criterion = nn.BCELoss() # 训练模型 train(generator, discriminator, dataloader, optimizer_G, optimizer_D, criterion, device) ```
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值