https://github.com/hindupuravinash/the-gan-zoo
生成式模型 & 生成对抗网络——资料梳理(专访资料 + 论文分类)
生成对抗网络Generative Adversarial Nets
生成式对抗网络GAN研究进展(四)——Laplacian Pyramid of Adversarial Networks,LAPGAN
生成式对抗网络GAN研究进展(五)——Deep Convolutional Generative Adversarial Nerworks,DCGAN
对抗生成网络(Generative Adversarial Net)
在caffe 中实现Generative Adversarial Nets(一)
在caffe 中实现Generative Adversarial Nets(二)
手把手教你写一个生成对抗网络 — 生成对抗网络代码全解析, 详细代码解析(TensorFlow, numpy, matplotlib, scipy)
GAN: Generative Adversarial Nets
http://lijiancheng0614.github.io/all-categories/
http://blog.evjang.com/2016/06/generative-adversarial-nets-in.html
https://plus.google.com/+SoumithChintala/posts/MCtDVqsef6f
http://blog.csdn.net/zhangjunhit/article/category/6869538
http://blog.csdn.net/AMDS123/article/category/6687276
生成式对抗网络GAN有哪些最新的发展,可以实际应用到哪些场景中?
基于CNN的图像修复(CNN-based Image Inpainting)
Image De-raining Using a Conditional Generative Adversarial Network
配套代码:https://github.com/hezhangsprinter/ID-CGAN
WGAN
Wasserstein GAN 的 TensorFlow 实现
WGAN 模型的 pytorch 代码实现
https://gist.github.com/soumith/71995cecc5b99cda38106ad64503cee3
GAN之父NIPS 2016演讲现场直击:全方位解读生成对抗网络的原理及未来(附PPT)
GAN(Generative Adversarial Nets)研究进展
如何评价谷歌最近在人脸数据集上取得惊人效果的BEGAN模型?
'''Deep Convolutional Generative Adversarial Network (DCGAN) Tutorial
This tutorials walks through an implementation of DCGAN as described in Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks.
To learn more about generative adversarial networks, see my Medium post on them.
'''
#Import the libraries we will need.
import tensorflow as tf
import numpy as np
import input_data
import matplotlib.pyplot as plt
import tensorflow.contrib.slim as slim
import os
import scipy.misc
import scipy
'''We will be using the MNIST dataset. input_data is a library that downloads the dataset and uzips it automatically. It can be acquired Github here: https://gist.github.com/awjuliani/1d21151bc17362bf6738c3dc02f37906
'''
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
#Helper Functions
#This function performns a leaky relu activation, which is needed for the discriminator network.
def lrelu(x, leak=0.2, name="lrelu"):
with tf.variable_scope(name):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
#The below functions are taken from carpdem20's implementation https://github.com/carpedm20/DCGAN-tensorflow
#They allow for saving sample images from the generator to follow progress
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imsave(images, size, path):
return scipy.misc.imsave(path, merge(images, size))
def inverse_transform(images):
return (images+1.)/2.
def merge(images, size):
h, w = images.shape[1], images.shape[2]
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx / size[1]
img[j*h:j*h+h, i*w:i*w+w] = image
return img
'''Defining the Adversarial Networks
Generator Network
The generator takes a vector of random numbers and transforms it into a 32x32 image. Each layer in the network involves a strided transpose convolution, batch normalization, and rectified nonlinearity. Tensorflow's slim library allows us to easily define each of these layers.
'''
def generator(z):
zP = slim.fully_connected(z,4*4*256,normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_project',weights_initializer=initializer)
zCon = tf.reshape(zP,[-1,4,4,256])
gen1 = slim.convolution2d_transpose(\
zCon,num_outputs=64,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv1', weights_initializer=initializer)
gen2 = slim.convolution2d_transpose(\
gen1,num_outputs=32,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv2', weights_initializer=initializer)
gen3 = slim.convolution2d_transpose(\
gen2,num_outputs=16,kernel_size=[5,5],stride=[2,2],\
padding="SAME",normalizer_fn=slim.batch_norm,\
activation_fn=tf.nn.relu,scope='g_conv3', weights_initializer=initializer)
g_out = slim.convolution2d_transpose(\
gen3,num_outputs=1,kernel_size=[32,32],padding="SAME",\
biases_initializer=None,activation_fn=tf.nn.tanh,\
scope='g_out', weights_initializer=initializer)
return g_out
'''Discriminator Network
The discriminator network takes as input a 32x32 image and transforms it into a single valued probability of being generated from real-world data. Again we use tf.slim to define the convolutional layers, batch normalization, and weight initialization.
'''
def discriminator(bottom, reuse=False):
dis1 = slim.convolution2d(bottom,16,[4,4],stride=[2,2],padding="SAME",\
biases_initializer=None,activation_fn=lrelu,\
reuse=reuse,scope='d_conv1',weights_initializer=initializer)
dis2 = slim.convolution2d(dis1,32,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv2', weights_initializer=initializer)
dis3 = slim.convolution2d(dis2,64,[4,4],stride=[2,2],padding="SAME",\
normalizer_fn=slim.batch_norm,activation_fn=lrelu,\
reuse=reuse,scope='d_conv3',weights_initializer=initializer)
d_out = slim.fully_connected(slim.flatten(dis3),1,activation_fn=tf.nn.sigmoid,\
reuse=reuse,scope='d_out', weights_initializer=initializer)
return d_out
#Connecting them together
tf.reset_default_graph()
z_size = 100 #Size of z vector used for generator.
#This initializaer is used to initialize all the weights of the network.
initializer = tf.truncated_normal_initializer(stddev=0.02)
#These two placeholders are used for input into the generator and discriminator, respectively.
z_in = tf.placeholder(shape=[None,z_size],dtype=tf.float32) #Random vector
real_in = tf.placeholder(shape=[None,32,32,1],dtype=tf.float32) #Real images
Gz = generator(z_in) #Generates images from random z vectors
Dx = discriminator(real_in) #Produces probabilities for real images
Dg = discriminator(Gz,reuse=True) #Produces probabilities for generator images
#These functions together define the optimization objective of the GAN.
d_loss = -tf.reduce_mean(tf.log(Dx) + tf.log(1.-Dg)) #This optimizes the discriminator.
g_loss = -tf.reduce_mean(tf.log(Dg)) #This optimizes the generator.
tvars = tf.trainable_variables()
#The below code is responsible for applying gradient descent to update the GAN.
trainerD = tf.train.AdamOptimizer(learning_rate=0.0002,beta1=0.5)
trainerG = tf.train.AdamOptimizer(learning_rate=0.0002,beta1=0.5)
d_grads = trainerD.compute_gradients(d_loss,tvars[9:]) #Only update the weights for the discriminator network.
g_grads = trainerG.compute_gradients(g_loss,tvars[0:9]) #Only update the weights for the generator network.
update_D = trainerD.apply_gradients(d_grads)
update_G = trainerG.apply_gradients(g_grads)
'''Training the network
Now that we have fully defined our network, it is time to train it!
'''
batch_size = 128 #Size of image batch to apply at each iteration.
iterations = 500000 #Total number of iterations to use.
sample_directory = './figs' #Directory to save sample images from generator in.
model_directory = './models' #Directory to save trained model to.
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
for i in range(iterations):
zs = np.random.uniform(-1.0,1.0,size=[batch_size,z_size]).astype(np.float32) #Generate a random z batch
xs,_ = mnist.train.next_batch(batch_size) #Draw a sample batch from MNIST dataset.
xs = (np.reshape(xs,[batch_size,28,28,1]) - 0.5) * 2.0 #Transform it to be between -1 and 1
xs = np.lib.pad(xs, ((0,0),(2,2),(2,2),(0,0)),'constant', constant_values=(-1, -1)) #Pad the images so the are 32x32
_,dLoss = sess.run([update_D,d_loss],feed_dict={z_in:zs,real_in:xs}) #Update the discriminator
_,gLoss = sess.run([update_G,g_loss],feed_dict={z_in:zs}) #Update the generator, twice for good measure.
_,gLoss = sess.run([update_G,g_loss],feed_dict={z_in:zs})
if i % 10 == 0:
print "Gen Loss: " + str(gLoss) + " Disc Loss: " + str(dLoss)
z2 = np.random.uniform(-1.0,1.0,size=[batch_size,z_size]).astype(np.float32) #Generate another z batch
newZ = sess.run(Gz,feed_dict={z_in:z2}) #Use new z to get sample images from generator.
if not os.path.exists(sample_directory):
os.makedirs(sample_directory)
#Save sample generator images for viewing training progress.
save_images(np.reshape(newZ[0:36],[36,32,32]),[6,6],sample_directory+'/fig'+str(i)+'.png')
if i % 1000 == 0 && i != 0:
if not os.path.exists(model_directory):
os.makedirs(model_directory)
saver.save(sess,model_directory+'/model-'+str(i)+'.cptk')
print "Saved Model"
'''Using a trained network
Once we have a trained model saved, we may want to use it to generate new images, and explore the representation it has learned.
'''
sample_directory = './figs' #Directory to save sample images from generator in.
model_directory = './models' #Directory to load trained model from.
batch_size_sample = 36
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
#Reload the model.
print 'Loading Model...'
ckpt = tf.train.get_checkpoint_state(path)
saver.restore(sess,ckpt.model_checkpoint_path)
zs = np.random.uniform(-1.0,1.0,size=[batch_size_sample,z_size]).astype(np.float32) #Generate a random z batch
newZ = sess.run(Gz,feed_dict={z_in:z2}) #Use new z to get sample images from generator.
if not os.path.exists(sample_directory):
os.makedirs(sample_directory)
save_images(np.reshape(newZ[0:batch_size_sample],[36,32,32]),[6,6],sample_directory+'/fig'+str(i)+'.png')