生成对抗网络GAN

import os

import numpy as np
np.random.seed(123)
print(“NumPy:{}”.format(np.version))

import pandas as pd
print(“Pandas:{}”.format(pd.version))

import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
rcParams[‘figure.figsize’]=15,10
print(“Matplotlib:{}”.format(mpl.version))

import tensorflow as tf
tf.set_random_seed(123)
print(“TensorFlow:{}”.format(tf.version))

import keras
print(“Keras:{}”.format(keras.version))

DATASETSLIB_HOME = ‘…/datasetslib’
import sys
if not DATASETSLIB_HOME in sys.path:
sys.path.append(DATASETSLIB_HOME)
%reload_ext autoreload
%autoreload 2
import datasetslib

from datasetslib import util as dsu
datasetslib.datasets_root = os.path.join(os.path.expanduser(’~’),‘datasets’)

from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets(os.path.join(datasetslib.datasets_root,‘mnist’), one_hot=False)

x_train = mnist.train.images
x_test = mnist.test.images
y_train = mnist.train.labels
y_test = mnist.test.labels

pixel_size = 28

def norm(x):
return (x-0.5)/0.5

n_z = 256
z_test = np.random.uniform(-1.0,1.0,size=[8,n_z])

Function to display the images and labels

def display_images(images):
for i in range(images.shape[0]):
plt.subplot(1, 8, i + 1)
plt.imshow(images[i])
plt.axis(‘off’)
plt.tight_layout()
plt.show()

tf.reset_default_graph()
keras.backend.clear_session()

########## Simple GAN in TensorFlow ##################

graph hyperparameters

g_learning_rate = 0.00001
d_learning_rate = 0.01

n_x = 784 # number of pixels in the MNIST image as number of inputs

number of hidden layers for generator and discriminator

g_n_layers = 3
d_n_layers = 1

neurons in each hidden layer

g_n_neurons = [256, 512, 1024]
d_n_neurons = [256]

define parameter ditionary

d_params = {}
g_params = {}

activation = tf.nn.leaky_relu
w_initializer = tf.glorot_uniform_initializer
b_initializer = tf.zeros_initializer

define generator

z_p = tf.placeholder(dtype=tf.float32, name=‘z_p’, shape=[None, n_z])

layer = z_p

add generator network weights, biases and layers

with tf.variable_scope(‘g’):
for i in range(0, g_n_layers):
w_name = ‘w_{0:04d}’.format(i)
g_params[w_name] = tf.get_variable(
name=w_name,
shape=[n_z if i == 0 else g_n_neurons[i - 1], g_n_neurons[i]],
initializer=w_initializer())

    b_name = 'b_{0:04d}'.format(i)
    g_params[b_name] = tf.get_variable(
        name=b_name, shape=[g_n_neurons[i]], initializer=b_initializer())

    layer = activation(
        tf.matmul(layer, g_params[w_name]) + g_params[b_name])

#output (logit) layer
i = g_n_layers
w_name = 'w_{0:04d}'.format(i)
g_params[w_name] = tf.get_variable(
    name=w_name,
    shape=[g_n_neurons[i - 1], n_x],
    initializer=w_initializer())

b_name = 'b_{0:04d}'.format(i)
g_params[b_name] = tf.get_variable(
    name=b_name, shape=[n_x], initializer=b_initializer())

g_logit = tf.matmul(layer, g_params[w_name]) + g_params[b_name]
g_model = tf.nn.tanh(g_logit)

define discriminator(s)

add discriminator network weights, biases

with tf.variable_scope(‘d’):
for i in range(0, d_n_layers):
w_name = ‘w_{0:04d}’.format(i)
d_params[w_name] = tf.get_variable(
name=w_name,
shape=[n_x if i == 0 else d_n_neurons[i - 1], d_n_neurons[i]],
initializer=w_initializer())

    b_name = 'b_{0:04d}'.format(i)
    d_params[b_name] = tf.get_variable(
        name=b_name, shape=[d_n_neurons[i]], initializer=b_initializer())

#output (logit) layer
i = d_n_layers
w_name = 'w_{0:04d}'.format(i)
d_params[w_name] = tf.get_variable(
    name=w_name, shape=[d_n_neurons[i - 1], 1], initializer=w_initializer())

b_name = 'b_{0:04d}'.format(i)
d_params[b_name] = tf.get_variable(
    name=b_name, shape=[1], initializer=b_initializer())

###################################################################################

define discriminator_real

input real images

x_p = tf.placeholder(dtype=tf.float32, name=‘x_p’, shape=[None, n_x])

layer = x_p

with tf.variable_scope(‘d’):
for i in range(0, d_n_layers):
w_name = ‘w_{0:04d}’.format(i)
b_name = ‘b_{0:04d}’.format(i)

    layer = activation(
        tf.matmul(layer, d_params[w_name]) + d_params[b_name])
    layer = tf.nn.dropout(layer,0.7)
#output (logit) layer
i = d_n_layers
w_name = 'w_{0:04d}'.format(i)
b_name = 'b_{0:04d}'.format(i)
d_logit_real = tf.matmul(layer, d_params[w_name]) + d_params[b_name]
d_model_real = tf.nn.sigmoid(d_logit_real)

define discriminator_fake

input generated fake images

z = g_model

layer = z

with tf.variable_scope(‘d’):
for i in range(0, d_n_layers):
w_name = ‘w_{0:04d}’.format(i)
b_name = ‘b_{0:04d}’.format(i)

    layer = activation(
        tf.matmul(layer, d_params[w_name]) + d_params[b_name])
    layer = tf.nn.dropout(layer,0.7)
#output (logit) layer
i = d_n_layers
w_name = 'w_{0:04d}'.format(i)
b_name = 'b_{0:04d}'.format(i)

d_logit_fake = tf.matmul(layer, d_params[w_name]) + d_params[b_name]
d_model_fake = tf.nn.sigmoid(d_logit_fake)

g_loss = -tf.reduce_mean(tf.log(d_model_fake))
d_loss = -tf.reduce_mean(tf.log(d_model_real) + tf.log(1 - d_model_fake))

g_optimizer = tf.train.AdamOptimizer(g_learning_rate)
d_optimizer = tf.train.GradientDescentOptimizer(d_learning_rate)

g_train_op = g_optimizer.minimize(g_loss, var_list=list(g_params.values()))
d_train_op = d_optimizer.minimize(d_loss, var_list=list(d_params.values()))

training hyperparameters

n_epochs = 400
batch_size = 100
n_batches = int(mnist.train.num_examples / batch_size)

n_epochs_print = 50

with tf.Session() as tfs:
tfs.run(tf.global_variables_initializer())
for epoch in range(n_epochs+1):
epoch_d_loss = 0.0
epoch_g_loss = 0.0
for batch in range(n_batches):
x_batch, _ = mnist.train.next_batch(batch_size)
x_batch = norm(x_batch)
z_batch = np.random.uniform(-1.0,1.0,size=[batch_size,n_z])
feed_dict = {x_p: x_batch,z_p: z_batch}
_,batch_d_loss = tfs.run([d_train_op,d_loss], feed_dict=feed_dict)

        z_batch = np.random.uniform(-1.0,1.0,size=[batch_size,n_z])
        feed_dict={z_p: z_batch}
        _,batch_g_loss = tfs.run([g_train_op,g_loss], feed_dict=feed_dict)
        
        epoch_d_loss += batch_d_loss 
        epoch_g_loss += batch_g_loss
        
    if epoch%n_epochs_print == 0:
        average_d_loss = epoch_d_loss / n_batches
        average_g_loss = epoch_g_loss / n_batches
        print('epoch: {0:04d}   d_loss = {1:0.6f}  g_loss = {2:0.6f}'
              .format(epoch,average_d_loss,average_g_loss))
        # predict images using generator model trained            
        x_pred = tfs.run(g_model,feed_dict={z_p:z_test})
        display_images(x_pred.reshape(-1,pixel_size,pixel_size))   
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值