GAN实战(一)

1.原代码中D和G网络均采用Adam优化器,,在训练几万步后容易出现loss为Nan的情况
2.D和G网络均采用SGD优化器,在较大的学习率如0.01时容易出现模型坍塌的情况,G网络的输出都为数字1,且经过较长时间的训练也无变化
3.G网络使用Adam优化器,D网络使用SGD优化器时可以取得较好的效果,效果如下图,每一千步输出一次G网络生成的图像

训练到5w步时的结果

# title: Simple GAN with two layer neural network

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
from tensorflow.examples.tutorials.mnist import input_data

tf.reset_default_graph()


batch_size = 128
Z_dim = 100

MOVING_AVERAGE_DECAY=0.9   ###滑动平均衰减

LEARNING_RATE_BASE=0.005   ###学习率
LEARNING_RATE_DECAY=0.98   ###学习率衰减



def xavier_init(size):
    input_dim = size[0]
    xavier_variance = 1. / tf.sqrt(input_dim/2.)
    return tf.random_normal(shape=size, stddev=xavier_variance)

def plot(samples): ###画图
    fig = plt.figure(figsize=(4, 4))
    gs = gridspec.GridSpec(4, 4)
    gs.update(wspace=0.05, hspace=0.05)

    for i, sample in enumerate(samples):
        ax = plt.subplot(gs[i])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
        plt.imshow(sample.reshape(28, 28), cmap='Greys_r')

    return fig





# Random noise setting for Generator
Z = tf.placeholder(tf.float32, shape=[None, Z_dim], name='Z')

#Generator parameter settings
G_W1 = tf.Variable(xavier_init([Z_dim, 128]), name='G_W1')
G_b1 = tf.Variable(tf.zeros(shape=[128]), name='G_b1')
G_W2 = tf.Variable(xavier_init([128, 784]), name='G_W2')
G_b2 = tf.Variable(tf.zeros(shape=[784]), name='G_b2')
theta_G = [G_W1, G_W2, G_b1, G_b2]

#Input Image MNIST setting for Discriminator [28x28=784]
X = tf.placeholder(tf.float32, shape=[None, 784], name='X')

#Discriminator parameter settings
D_W1 = tf.Variable(xavier_init([784, 128]), name='D_W1')
D_b1 = tf.Variable(tf.zeros(shape=[128]), name='D_b1')
D_W2 = tf.Variable(xavier_init([128, 1]), name='D_W2')
D_b2 = tf.Variable(tf.zeros(shape=[1]), name='D_b2')

theta_D = [D_W1, D_W2, D_b1, D_b2]

# Generator Network
def generator(z):
    G_h1 = tf.nn.relu(tf.matmul(z, G_W1) + G_b1)
    G_log_prob = tf.matmul(G_h1, G_W2) + G_b2
    G_prob = tf.nn.sigmoid(G_log_prob)

    return G_prob

# Discriminator Network
def discriminator(x):
    D_h1 = tf.nn.relu(tf.matmul(x, D_W1) + D_b1)
    D_logit = tf.matmul(D_h1, D_W2) + D_b2
    D_prob = tf.nn.sigmoid(D_logit)

    return D_prob, D_logit





G_sample = generator(Z)

D_real, D_logit_real = discriminator(X)
D_fake, D_logit_fake = discriminator(G_sample)

# Loss functions from the paper
D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake))
G_loss = -tf.reduce_mean(tf.log(D_fake))

'''
书中使用Adam优化器,,训练一定事件后容易出现loss为Nan
'''
## Update D(X)'s parameters
#D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
#
## Update G(Z)'s parameters
#G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)


#    训练轮数变量
global_step=tf.Variable(0,trainable=False)
#    初始化滑动平均
variable_averages=tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY,global_step)
#tf.get_variables返回所有没有指定trainable=False的参数
#    在所有代表神经网络参数的变量上使用滑动平均值
variables_averages_op=variable_averages.apply(tf.trainable_variables())

learning_rate=tf.train.exponential_decay(LEARNING_RATE_BASE,global_step,10000,LEARNING_RATE_DECAY,staircase=True)


#D_solver = tf.train.GradientDescentOptimizer(learning_rate).minimize(D_loss, var_list=theta_D,global_step=global_step)
#G_solver = tf.train.GradientDescentOptimizer(learning_rate).minimize(G_loss, var_list=theta_G,global_step=global_step)



#D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D,global_step=global_step)
#G_solver = tf.train.GradientDescentOptimizer(learning_rate).minimize(G_loss, var_list=theta_G,global_step=global_step)




D_solver = tf.train.GradientDescentOptimizer(learning_rate).minimize(D_loss, var_list=theta_D,global_step=global_step)
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G,global_step=global_step)



with tf.control_dependencies([D_solver,variables_averages_op]):
    D_solver_train_op=tf.no_op(name='D_solver')

with tf.control_dependencies([G_solver,variables_averages_op]):
    G_solver_train_op=tf.no_op(name='G_solver')

def sample_Z(m, n):
    return np.random.uniform(-1., 1., size=[m, n])



sess = tf.Session()
sess.run(tf.global_variables_initializer())

mnist = input_data.read_data_sets('MNIST/', one_hot=True)

if not os.path.exists('output/'):
    os.makedirs('output/')

i = 0

for itr in range(1000000):
    if itr % 1000 == 0:
        samples = sess.run(G_sample, feed_dict={Z: sample_Z(16, Z_dim)})

        fig = plot(samples)
        plt.savefig('output/{}.png'.format(str(i).zfill(3)), bbox_inches='tight')   ###zfill左边填充0
        i += 1
        plt.show()
        plt.close(fig)

    X_mb, _ = mnist.train.next_batch(batch_size)

    _, D_loss_curr,__ = sess.run([D_solver_train_op, D_loss,global_step], feed_dict={X: X_mb, Z: sample_Z(batch_size, Z_dim)})
    _, G_loss_curr,__ = sess.run([G_solver_train_op, G_loss,global_step], feed_dict={Z: sample_Z(batch_size, Z_dim)})
#    _, G_loss_curr,__ = sess.run([G_solver_train_op, G_loss,global_step], feed_dict={Z: sample_Z(batch_size, Z_dim)})  
    
    
    
#    while D_loss_curr > 1.5:
#        X_mb, _ = mnist.train.next_batch(batch_size)
#        _, D_loss_curr,__ = sess.run([D_solver_train_op, D_loss,global_step], feed_dict={X: X_mb, Z: sample_Z(batch_size, Z_dim)})
#        print('D_loss_curr: {:.4}'.format(D_loss_curr))
        
        
    
#    while G_loss_curr > 2:
#        _, G_loss_curr,__ = sess.run([G_solver_train_op, G_loss,global_step], feed_dict={Z: sample_Z(batch_size, Z_dim)})
#        print('G_loss_curr: {:.4}'.format(G_loss_curr))
        
        
        
    if itr % 1000 == 0:
        print('Iter: {}'.format(itr))
        print('D loss: {:.4}'.format(D_loss_curr))
        print('G_loss: {:.4}'.format(G_loss_curr))
        print()



  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值