gan,wgan和wagn_gp的区别和联系

转载链接: https://blog.csdn.net/qq_38826019/article/details/80786061











以下为MNIST测试,主要为了构建模型,只跑了,少量epoch,效果如下:

WGAN 2个epoch


wgan-gp 6个epoch


gan 10个epoch


有时间可以多跑几轮,这里就不展示了。

代码如下


   
   
  1. from datetime import datetime
  2. import os
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. import tensorflow as tf
  6. from six.moves import xrange
  7. from tensorflow.examples.tutorials.mnist import input_data
  8. mnist = input_data.read_data_sets( "MNIST_data",one_hot= True)
  9. data= mnist.train.images #(55000,784)
  10. def Save_genImages(gen, epoch):
  11. r,c = 10, 10
  12. fig,axs = plt.subplots(r,c)
  13. cnt = 0
  14. print(gen.shape)
  15. for i in range(r):
  16. for j in range(c):
  17. axs[i,j].imshow(gen[cnt][:,:],cmap= 'Greys_r')
  18. axs[i,j].axis( 'off')
  19. cnt += 1
  20. if not os.path.exists( 'gen_mnist'):
  21. os.makedirs( 'gen_mnist')
  22. fig.savefig( 'gen_mnist/%d.jpg' % epoch)
  23. plt.close()
  24. def Save_lossValue(epoch,iters,d_loss,g_loss):
  25. with open( 'loss2.txt', 'a') as f:
  26. f.write( "第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+ '\n')
  27. def plot_loss(loss):
  28. fig,ax = plt.subplots(figsize=( 20, 7))
  29. losses = np.array(loss)
  30. plt.plot(losses.T[ 0], label= "Discriminator Loss")
  31. plt.plot(losses.T[ 1], label= "Generator Loss")
  32. plt.title( "Training Losses")
  33. plt.legend()
  34. plt.savefig( 'loss2.jpg')
  35. plt.show()
  36. #定义Relu激活函数
  37. def Relu(name, tensor):
  38. return tf.nn.relu(tensor,name)
  39. #定义LeakyRelu激活函数
  40. def LeakyRelu(x, alpha=0.2):
  41. return tf.maximum(x, alpha * x)
  42. #定义全连接层
  43. def Fully_connected(name, value, output_shape):
  44. with tf.variable_scope(name, reuse= None) as scope:
  45. shape = value.get_shape().as_list()
  46. w = tf.get_variable( 'w', [shape[ 1], output_shape], dtype=tf.float32,
  47. initializer=tf.random_normal_initializer(stddev= 0.01))
  48. b = tf.get_variable( 'b', [output_shape], dtype=tf.float32, initializer=tf.constant_initializer( 0.0))
  49. return tf.matmul(value, w) + b
  50. #定义一维卷积
  51. def Conv1d(name, tensor, ksize, out_dim, stride, padding, stddev=0.01):
  52. with tf.variable_scope(name):
  53. w = tf.get_variable( 'w',[ksize,tensor.get_shape()[ -1],out_dim],dtype=tf.float32,
  54. initializer=tf.random_normal_initializer(stddev=stddev))
  55. var = tf.nn.conv1d(tensor,w,stride,padding=padding)
  56. b = tf.get_variable( 'b',[out_dim], 'float32',initializer=tf.constant_initializer( 0.01))
  57. return tf.nn.bias_add(var,b)
  58. #定义二维卷积
  59. def Conv2d(name, tensor, filter_size1 ,filter_size2, out_dim, stride1, stride2, padding, stddev=0.01):
  60. with tf.variable_scope(name):
  61. w = tf.get_variable( 'w',[filter_size1, filter_size2, tensor.get_shape()[ -1], out_dim], dtype=tf.float32,
  62. initializer=tf.random_normal_initializer(stddev=stddev))
  63. var = tf.nn.conv2d(tensor, w, [ 1, stride1, stride2, 1], padding=padding)
  64. b = tf.get_variable( 'b',[out_dim], 'float32', initializer=tf.constant_initializer( 0.01))
  65. return tf.nn.bias_add(var,b)
  66. #定义二维反卷积
  67. def Deconv2d(name, tensor, filter_size1, filter_size2, outshape, stride1, stride2, padding, stddev=0.01):
  68. with tf.variable_scope(name):
  69. w = tf.get_variable( 'w', [filter_size1, filter_size2, outshape[ -1], tensor.get_shape()[ -1]], dtype=tf.float32,
  70. initializer=tf.random_normal_initializer(stddev=stddev))
  71. var = tf.nn.conv2d_transpose(tensor, w, outshape, strides=[ 1,stride1, stride2, 1], padding=padding)
  72. b = tf.get_variable( 'b', [outshape[ -1]], 'float32', initializer=tf.constant_initializer( 0.01))
  73. return tf.nn.bias_add(var,b)
  74. def Get_inputs(real_size,noise_size):
  75. real_img = tf.placeholder(tf.float32, [ None, real_size], name= 'real_img')
  76. noise_img = tf.placeholder(tf.float32, [ None, noise_size], name= 'noise_img')
  77. return real_img, noise_img
  78. def Discriminator(img, reuse=False, name='discriminator'):
  79. with tf.variable_scope(name, reuse=reuse):
  80. output = Fully_connected( 'df1',img, 2048)
  81. output = LeakyRelu(output)
  82. output = Fully_connected( 'df2',output, 1024)
  83. output = LeakyRelu(output)
  84. output = Fully_connected( 'df3',output, 512)
  85. output = LeakyRelu(output)
  86. output = Fully_connected( 'df4',output, 256)
  87. output = LeakyRelu(output)
  88. output = Fully_connected( 'df5',output, 1)
  89. return output
  90. def Generator(noise_img, reuse=False, name='generator'):
  91. with tf.variable_scope(name,reuse=reuse):
  92. output = Fully_connected( 'gf1',noise_img, 2048)
  93. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  94. output = tf.nn.relu(output)
  95. output = Fully_connected( 'gf2',output, 1024)
  96. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  97. output = tf.nn.relu(output)
  98. output = Fully_connected( 'gf3',output, 512)
  99. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  100. output = tf.nn.relu(output)
  101. output = Fully_connected( 'gf4',output, 256)
  102. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  103. output = tf.nn.relu(output)
  104. output = Fully_connected( 'gf5',output, 784)
  105. output = tf.nn.tanh(output)
  106. return output
  107. mode = 'wgan' # gan, wgan, wgan-gp
  108. noise = 'normal' # normal0_1, normal, uniform
  109. batch_size = 100
  110. epochs = 10
  111. n_sample = 100
  112. lamda = 10
  113. img_size = 784
  114. noise_size = 100
  115. tf.reset_default_graph()
  116. real_img, noise_img = Get_inputs(img_size,noise_size) #feed于此
  117. real_data = real_img
  118. fake_data = Generator(noise_img)
  119. disc_real = Discriminator(real_data,reuse= False)
  120. disc_fake = Discriminator(fake_data,reuse= True)
  121. #生成器和判别器中的tensor
  122. train_vars = tf.trainable_variables()
  123. g_vars = [var for var in train_vars if var.name.startswith( "generator")]
  124. d_vars = [var for var in train_vars if var.name.startswith( "discriminator")]
  125. #普通的GAN
  126. if mode == 'gan':
  127. gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
  128. disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
  129. disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
  130. disc_cost /= 2. #判别器loss
  131. #优化器
  132. gen_train_op = tf.train.AdamOptimizer(learning_rate= 2e-4, beta1= 0.5).minimize(gen_cost,var_list=g_vars)
  133. disc_train_op = tf.train.AdamOptimizer(learning_rate= 2e-4,beta1= 0.5).minimize(disc_cost,var_list=d_vars)
  134. clip_disc_weights = None
  135. #wgan
  136. elif mode == 'wgan':
  137. gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
  138. disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
  139. #优化器
  140. gen_train_op = tf.train.RMSPropOptimizer(learning_rate= 5e-5).minimize(gen_cost,var_list=g_vars)
  141. disc_train_op = tf.train.RMSPropOptimizer(learning_rate= 5e-5).minimize(disc_cost,var_list=d_vars)
  142. clip_ops = []
  143. #将判别器权重截断到[-0.01,0.01]
  144. for var in train_vars:
  145. if var.name.startswith( "discriminator"):
  146. clip_bounds = [ -0.01, 0.01]
  147. clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[ 0],clip_bounds[ 1])))
  148. clip_disc_weights = tf.group(*clip_ops)
  149. elif mode == 'wgan-gp':
  150. gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
  151. disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
  152. #梯度惩罚
  153. alpha = tf.random_uniform(shape=[batch_size, 1],minval= 0.,maxval= 1.)
  154. interpolates = alpha*fake_data + ( 1-alpha)*real_data
  155. gradients = tf.gradients(Discriminator(interpolates,reuse= True),[interpolates])[ 0]
  156. slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[ 1]))
  157. gradient_penalty = tf.reduce_mean((slopes -1.)** 2)
  158. disc_cost += lamda * gradient_penalty
  159. clip_disc_weights = None
  160. #优化器
  161. gen_train_op = tf.train.AdamOptimizer(learning_rate= 1e-4,beta1= 0.5,beta2= 0.9).minimize(gen_cost,var_list=g_vars)
  162. disc_train_op = tf.train.AdamOptimizer(learning_rate= 1e-4,beta1= 0.5,beta2= 0.9).minimize(disc_cost,var_list=d_vars)
  163. saver = tf.train.Saver()
  164. def Train():
  165. losses = []
  166. with tf.Session() as sess:
  167. sess.run(tf.global_variables_initializer())
  168. for e in range(epochs):
  169. for i in xrange(len(data)//batch_size):
  170. batch_images = data[i*batch_size:(i+ 1)*batch_size]
  171. batch_images = batch_images.reshape(batch_size, 784)
  172. #batch = mnist.train.next_batch(batch_size)
  173. #batch_images = batch[0].reshape((batch_size,784))
  174. if noise != 'normal0_1' :
  175. batch_images = batch_images* 2 -1
  176. if noise == 'uniform':
  177. batch_noise = np.random.uniform( -1, 1, size=(batch_size, noise_size))
  178. elif noise == 'normal':
  179. batch_noise = np.random.normal( -1, 1, size=(batch_size, noise_size))
  180. elif noise == 'normal0_1':
  181. batch_noise = np.random.normal( 0, 1, size=(batch_size, noise_size))
  182. if mode == 'gan': #普通的gan,判别器,生成器各训练一次
  183. disc_iters = 1
  184. else: #wgan和wgan-gp,判别器训练多次,生成器训练一次
  185. disc_iters = 6
  186. for x in range( 0, disc_iters):
  187. _,d_loss = sess.run([disc_train_op,disc_cost],feed_dict={real_data:batch_images,noise_img:batch_noise})
  188. if clip_disc_weights is not None:
  189. _ = sess.run(clip_disc_weights)
  190. _,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})
  191. Save_lossValue(e,i,d_loss,g_loss)
  192. print( "第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(e, i, d_loss, g_loss))
  193. losses.append((d_loss,g_loss))
  194. if noise == 'uniform':
  195. sample_noise = np.random.uniform( -1, 1, size=(batch_size, noise_size))
  196. elif noise == 'normal':
  197. sample_noise = np.random.normal( -1, 1, size=(batch_size, noise_size))
  198. elif noise == 'normal0_1':
  199. sample_noise = np.random.normal( 0, 1, size=(batch_size, noise_size))
  200. gen_samples = sess.run(Generator(noise_img,reuse= True),feed_dict={noise_img:sample_noise})
  201. print(gen_samples.shape)
  202. saver.save(sess, 'checkpoints/test2.ckpt')
  203. if e % 1 == 0:
  204. gen = gen_samples.reshape( 100, 28, 28)
  205. if noise != 'normal0_1':
  206. gen = (gen+ 1)/ 2
  207. Save_genImages(gen, e)
  208. plot_loss(losses)
  209. def Test():
  210. saver = tf.train.Saver(var_list=g_vars)
  211. with tf.Session() as sess:
  212. saver.restore(sess,tf.train.latest_checkpoint( "checkpoints"))
  213. # saver.restore(sess,'checkppoints/test2.ckpt')
  214. if noise == 'uniform':
  215. sample_noise = np.random.uniform( -1, 1, size=(batch_size, noise_size))
  216. elif noise == 'normal':
  217. sample_noise = np.random.normal( -1, 1, size=(batch_size, noise_size))
  218. elif noise == 'normal0_1':
  219. sample_noise = np.random.normal( 0, 1, size=(batch_size, noise_size))
  220. gen_samples = sess.run(Generator(noise_img,reuse= True),feed_dict={noise_img:sample_noise})
  221. if noise != 'normal0_1':
  222. gen_images = (gen_samples+ 1)/ 2
  223. if __name__ == '__main__':
  224. Train()
  225. #Test()
以下测试自己的数据集

   
   
  1. from datetime import datetime
  2. import os
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. import tensorflow as tf
  6. from six.moves import xrange
  7. data = np.load( 'final37.npy')
  8. data = data[:,:, 0: 60]
  9. #显示原始数据图像
  10. def Show_images(data,show_nums,save=False):
  11. index = 0
  12. for n in range(show_nums):
  13. show_images = data[index:index+ 100]
  14. show_images = show_images.reshape( 100, 3, 60, 1)
  15. r,c = 10, 10
  16. fig,axs = plt.subplots(r,c)
  17. cnt = 0
  18. for i in range(r):
  19. for j in range(c):
  20. xy = show_images[cnt]
  21. for k in range(len(xy)):
  22. x = xy[k][ 0: 30]
  23. y = xy[k][ 30: 60]
  24. if k == 0 :
  25. axs[i,j].plot(x,y,color= 'blue',linewidth= 2)
  26. if k == 1:
  27. axs[i,j].plot(x,y,color= 'red',linewidth= 2)
  28. if k == 2:
  29. axs[i,j].plot(x,y,color= 'green',linewidth= 2)
  30. axs[i,j].axis( 'off')
  31. cnt += 1
  32. index += 100
  33. if save:
  34. if not os.path.exists( 'This_epoch'):
  35. os.makedirs( 'This_epoch')
  36. fig.savefig( 'This_epoch/%d.jpg' % n)
  37. plt.close()
  38. else:
  39. plt.show()
  40. def Save_genImages(gen, epoch):
  41. r,c = 10, 10
  42. fig,axs = plt.subplots(r,c)
  43. cnt = 0
  44. for i in range(r):
  45. for j in range(c):
  46. xy = gen[cnt]
  47. for k in range(len(xy)):
  48. x = xy[k][ 0: 30]
  49. y = xy[k][ 30: 60]
  50. if k == 0:
  51. axs[i,j].plot(x,y,color= 'blue')
  52. if k == 1:
  53. axs[i,j].plot(x,y,color= 'red')
  54. if k == 2:
  55. axs[i,j].plot(x,y,color= 'green')
  56. axs[i,j].axis( 'off')
  57. cnt += 1
  58. if not os.path.exists( 'gen_img1'):
  59. os.makedirs( 'gen_img1')
  60. fig.savefig( 'gen_img1/%d.jpg' % epoch)
  61. plt.close()
  62. def Save_lossValue(epoch,iters,d_loss,g_loss):
  63. with open( 'losst.txt', 'a') as f:
  64. f.write( "第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+ '\n')
  65. def plot_loss(loss):
  66. fig,ax = plt.subplots(figsize=( 20, 7))
  67. losses = np.array(loss)
  68. plt.plot(losses.T[ 0], label= "Discriminator Loss")
  69. plt.plot(losses.T[ 1], label= "Generator Loss")
  70. plt.title( "Training Losses")
  71. plt.legend()
  72. plt.savefig( 'loss.jpg')
  73. plt.show()
  74. #定义Relu激活函数
  75. def Relu(name, tensor):
  76. return tf.nn.relu(tensor,name)
  77. #定义LeakyRelu激活函数
  78. def LeakyRelu(name, x, leak=0.2):
  79. return tf.maximum(x,leak*x,name=name)
  80. #定义全连接层
  81. def Fully_connected(name, value, output_shape):
  82. with tf.variable_scope(name,reuse= None) as scope:
  83. shape = value.get_shape().as_list()
  84. w = tf.get_variable( 'w',[shape[ 1],output_shape],dtype=tf.float32,
  85. initializer=tf.random_normal_initializer(stddev= 0.01))
  86. b = tf.get_variable( 'b',[output_shape],dtype=tf.float32,initializer=tf.constant_initializer( 0.0))
  87. return tf.matmul(value,w) + b
  88. #定义一维卷积
  89. def Conv1d(name, tensor, ksize, out_dim, stride, padding, stddev=0.01):
  90. with tf.variable_scope(name):
  91. w = tf.get_variable( 'w',[ksize,tensor.get_shape()[ -1],out_dim],dtype=tf.float32,
  92. initializer=tf.random_normal_initializer(stddev=stddev))
  93. var = tf.nn.conv1d(tensor,w,stride,padding=padding)
  94. b = tf.get_variable( 'b',[out_dim], 'float32',initializer=tf.constant_initializer( 0.01))
  95. return tf.nn.bias_add(var,b)
  96. #定义二维卷积
  97. def Conv2d(name, tensor, filter_size1 ,filter_size2, out_dim, stride1, stride2, padding, stddev=0.01):
  98. with tf.variable_scope(name):
  99. w = tf.get_variable( 'w',[filter_size1, filter_size2, tensor.get_shape()[ -1], out_dim], dtype=tf.float32,
  100. initializer=tf.random_normal_initializer(stddev=stddev))
  101. var = tf.nn.conv2d(tensor, w, [ 1, stride1, stride2, 1], padding=padding)
  102. b = tf.get_variable( 'b',[out_dim], 'float32', initializer=tf.constant_initializer( 0.01))
  103. return tf.nn.bias_add(var,b)
  104. #定义二维反卷积
  105. def Deconv2d(name, tensor, filter_size1, filter_size2, outshape, stride1, stride2, padding, stddev=0.01):
  106. with tf.variable_scope(name):
  107. w = tf.get_variable( 'w', [filter_size1, filter_size2, outshape[ -1], tensor.get_shape()[ -1]], dtype=tf.float32,
  108. initializer=tf.random_normal_initializer(stddev=stddev))
  109. var = tf.nn.conv2d_transpose(tensor, w, outshape, strides=[ 1,stride1, stride2, 1], padding=padding)
  110. b = tf.get_variable( 'b', [outshape[ -1]], 'float32', initializer=tf.constant_initializer( 0.01))
  111. return tf.nn.bias_add(var,b)
  112. def Get_inputs(real_size,noise_size):
  113. real_img = tf.placeholder(tf.float32, [ None, real_size], name= 'real_img')
  114. noise_img = tf.placeholder(tf.float32, [ None, noise_size], name= 'noise_img')
  115. return real_img, noise_img
  116. def Generator(noise_img, reuse=False, alpha=0.01):
  117. with tf.variable_scope( 'generator',reuse=reuse):
  118. # print(noise_img.shape)
  119. output = tf.layers.dense(noise_img, 128)
  120. # print(output.shape)
  121. output = tf.maximum(alpha * output,output)
  122. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  123. output = tf.layers.dropout(output, rate= 0.25)
  124. output = tf.layers.dense(output, 512)
  125. output = tf.maximum(alpha * output,output)
  126. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  127. output = tf.layers.dropout(output,rate= 0.25)
  128. output = tf.layers.dense(output, 180)
  129. output = tf.tanh(output)
  130. return output
  131. def Discriminator(img,reuse=False,alpha=0.01):
  132. with tf.variable_scope( "discriminator", reuse=reuse):
  133. print(img.shape)
  134. output = tf.layers.dense(img, 512)
  135. output = tf.maximum(alpha * output, output)
  136. output = tf.layers.dense(output, 128)
  137. output = tf.maximum(alpha * output, output)
  138. output = tf.layers.dense(output, 1)
  139. return output
  140. mode = 'gan' #gan, wgan, wgan-gp
  141. batch_size = 100
  142. epochs = 1
  143. n_sample = 100
  144. learning_rate = 0.0002
  145. lamda = 10
  146. img_size = 180
  147. noise_size = 100
  148. tf.reset_default_graph()
  149. real_img, noise_img = Get_inputs(img_size,noise_size) #feed于此
  150. real_data = real_img
  151. fake_data = Generator(noise_img)
  152. disc_real = Discriminator(real_data,reuse= False)
  153. disc_fake = Discriminator(fake_data,reuse= True)
  154. #生成器和判别器中的tensor
  155. train_vars = tf.trainable_variables()
  156. g_vars = [var for var in train_vars if var.name.startswith( "generator")]
  157. d_vars = [var for var in train_vars if var.name.startswith( "discriminator")]
  158. #普通的GAN
  159. if mode == 'gan':
  160. gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
  161. disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
  162. disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
  163. disc_cost /= 2. #判别器loss
  164. #优化器
  165. gen_train_op = tf.train.AdamOptimizer(learning_rate= 2e-4, beta1= 0.5).minimize(gen_cost,var_list=g_vars)
  166. disc_train_op = tf.train.AdamOptimizer(learning_rate= 2e-4,beta1= 0.5).minimize(disc_cost,var_list=d_vars)
  167. clip_disc_weights = None
  168. #wgan
  169. elif mode == 'wgan':
  170. gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
  171. disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
  172. #优化器
  173. gen_train_op = tf.train.RMSPropOptimizer(learning_rate= 5e-5).minimize(gen_cost,var_list=g_vars)
  174. disc_train_op = tf.train.RMSPropOptimizer(learning_rate= 5e-5).minimize(disc_cost,var_list=d_vars)
  175. clip_ops = []
  176. #将判别器权重截断到[-0.01,0.01]
  177. for var in train_vars:
  178. if var.name.startswith( "discriminator"):
  179. clip_bounds = [ -0.01, 0.01]
  180. clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[ 0],clip_bounds[ 1])))
  181. clip_disc_weights = tf.group(*clip_ops)
  182. elif mode == 'wgan-gp':
  183. gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
  184. disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
  185. #梯度惩罚
  186. alpha = tf.random_uniform(shape=[batch_size, 1],minval= 0.,maxval= 1.)
  187. interpolates = alpha*fake_data + ( 1-alpha)*real_data
  188. gradients = tf.gradients(Discriminator(interpolates,reuse= True),[interpolates])[ 0]
  189. slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[ 1]))
  190. gradient_penalty = tf.reduce_mean((slopes -1.)** 2)
  191. disc_cost += lamda * gradient_penalty
  192. clip_disc_weights = None
  193. #优化器
  194. gen_train_op = tf.train.AdamOptimizer(learning_rate= 1e-4,beta1= 0.5,beta2= 0.9).minimize(gen_cost,var_list=g_vars)
  195. disc_train_op = tf.train.AdamOptimizer(learning_rate= 1e-4,beta1= 0.5,beta2= 0.9).minimize(disc_cost,var_list=d_vars)
  196. saver = tf.train.Saver()
  197. def Train():
  198. losses = []
  199. with tf.Session() as sess:
  200. sess.run(tf.global_variables_initializer())
  201. for e in range(epochs):
  202. for i in xrange(len(data)//batch_size):
  203. batch_images = data[i*batch_size:(i+ 1)*batch_size]
  204. batch_images = batch_images.reshape(batch_size, 180)
  205. batch_images = batch_images* 2 -1
  206. batch_noise = np.random.uniform( -1, 1,size=(batch_size,noise_size))
  207. if mode == 'gan': #普通的gan,判别器,生成器各训练一次
  208. disc_iters = 2
  209. else: #wgan和wgan-gp,判别器训练多次,生成器训练一次
  210. disc_iters = 2
  211. for x in range( 0,disc_iters):
  212. _,d_loss = sess.run([disc_train_op,disc_cost],feed_dict={real_data:batch_images,noise_img:batch_noise})
  213. if clip_disc_weights is not None:
  214. _ = sess.run(clip_disc_weights)
  215. _,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})
  216. Save_lossValue(e,i,d_loss,g_loss)
  217. print( "第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(e, i, d_loss, g_loss))
  218. losses.append((d_loss,g_loss))
  219. sample_noise = np.random.uniform( -1, 1,size=( 100, 100))
  220. gen_samples = sess.run(Generator(noise_img,reuse= True),feed_dict={noise_img:sample_noise})
  221. print(gen_samples.shape)
  222. saver.save(sess, 'checkpoints/test.ckpt')
  223. if e % 1 == 0:
  224. gen = gen_samples.reshape( 100, 3, 60, 1)
  225. Save_genImages(gen, e)
  226. plot_loss(losses)
  227. def Test():
  228. saver = tf.train.Saver(var_list=g_vars)
  229. with tf.Session() as sess:
  230. saver.restore(sess,tf.train.latest_checkpoint( "checkpoints"))
  231. # saver.restore(sess,'checkppoints/b.ckpt')
  232. sample_noise = np.random.uniform( -1, 1, size=( 10000,noise_size))
  233. gen_samples = sess.run(Generator(noise_img,reuse= True),feed_dict={noise_img:sample_noise})
  234. gen_images = (gen_samples+ 1)/ 2
  235. show_num = len(gen_images)// 100
  236. Show_images(gen_images,show_num,save= True)
  237. if __name__ == '__main__':
  238. Train()
  239. #Test()

   
   
  1. from datetime import datetime
  2. import os
  3. import matplotlib.pyplot as plt
  4. import numpy as np
  5. import tensorflow as tf
  6. from six.moves import xrange
  7. data = np.load( 'data/final37.npy')
  8. data = data[:,:, 0: 60]
  9. #显示原始数据图像
  10. def Show_images(data,show_nums,save=False):
  11. index = 0
  12. for n in range(show_nums):
  13. show_images = data[index:index+ 100]
  14. show_images = show_images.reshape( 100, 3, 60, 1)
  15. r,c = 10, 10
  16. fig,axs = plt.subplots(r,c)
  17. cnt = 0
  18. for i in range(r):
  19. for j in range(c):
  20. xy = show_images[cnt]
  21. for k in range(len(xy)):
  22. x = xy[k][ 0: 30]
  23. y = xy[k][ 30: 60]
  24. if k == 0 :
  25. axs[i,j].plot(x,y,color= 'blue',linewidth= 2)
  26. if k == 1:
  27. axs[i,j].plot(x,y,color= 'red',linewidth= 2)
  28. if k == 2:
  29. axs[i,j].plot(x,y,color= 'green',linewidth= 2)
  30. axs[i,j].axis( 'off')
  31. cnt += 1
  32. index += 100
  33. if save:
  34. if not os.path.exists( 'This_epoch2'):
  35. os.makedirs( 'This_epoch2')
  36. fig.savefig( 'This_epoch2/%d.jpg' % n)
  37. plt.close()
  38. else:
  39. plt.show()
  40. def Save_genImages(gen, epoch):
  41. r,c = 10, 10
  42. fig,axs = plt.subplots(r,c)
  43. cnt = 0
  44. for i in range(r):
  45. for j in range(c):
  46. xy = gen[cnt]
  47. for k in range(len(xy)):
  48. x = xy[k][ 0: 30]
  49. y = xy[k][ 30: 60]
  50. if k == 0:
  51. axs[i,j].plot(x,y,color= 'blue')
  52. if k == 1:
  53. axs[i,j].plot(x,y,color= 'red')
  54. if k == 2:
  55. axs[i,j].plot(x,y,color= 'green')
  56. axs[i,j].axis( 'off')
  57. cnt += 1
  58. if not os.path.exists( 'gen_img2'):
  59. os.makedirs( 'gen_img2')
  60. fig.savefig( 'gen_img2/%d.jpg' % epoch)
  61. plt.close()
  62. def Save_lossValue(epoch,iters,d_loss,g_loss):
  63. with open( 'loss2.txt', 'a') as f:
  64. f.write( "第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+ '\n')
  65. def plot_loss(loss):
  66. fig,ax = plt.subplots(figsize=( 20, 7))
  67. losses = np.array(loss)
  68. plt.plot(losses.T[ 0], label= "Discriminator Loss")
  69. plt.plot(losses.T[ 1], label= "Generator Loss")
  70. plt.title( "Training Losses")
  71. plt.legend()
  72. plt.savefig( 'loss2.jpg')
  73. plt.show()
  74. #定义Relu激活函数
  75. def Relu(name, tensor):
  76. return tf.nn.relu(tensor,name)
  77. #定义LeakyRelu激活函数
  78. def LeakyRelu(x, alpha=0.2):
  79. return tf.maximum(x, alpha * x)
  80. #定义全连接层
  81. def Fully_connected(name, value, output_shape):
  82. with tf.variable_scope(name, reuse= None) as scope:
  83. shape = value.get_shape().as_list()
  84. w = tf.get_variable( 'w', [shape[ 1], output_shape], dtype=tf.float32,
  85. initializer=tf.random_normal_initializer(stddev= 0.01))
  86. b = tf.get_variable( 'b', [output_shape], dtype=tf.float32, initializer=tf.constant_initializer( 0.0))
  87. return tf.matmul(value, w) + b
  88. #定义一维卷积
  89. def Conv1d(name, tensor, ksize, out_dim, stride, padding, stddev=0.01):
  90. with tf.variable_scope(name):
  91. w = tf.get_variable( 'w',[ksize,tensor.get_shape()[ -1],out_dim],dtype=tf.float32,
  92. initializer=tf.random_normal_initializer(stddev=stddev))
  93. var = tf.nn.conv1d(tensor,w,stride,padding=padding)
  94. b = tf.get_variable( 'b',[out_dim], 'float32',initializer=tf.constant_initializer( 0.01))
  95. return tf.nn.bias_add(var,b)
  96. #定义二维卷积
  97. def Conv2d(name, tensor, filter_size1 ,filter_size2, out_dim, stride1, stride2, padding, stddev=0.01):
  98. with tf.variable_scope(name):
  99. w = tf.get_variable( 'w',[filter_size1, filter_size2, tensor.get_shape()[ -1], out_dim], dtype=tf.float32,
  100. initializer=tf.random_normal_initializer(stddev=stddev))
  101. var = tf.nn.conv2d(tensor, w, [ 1, stride1, stride2, 1], padding=padding)
  102. b = tf.get_variable( 'b',[out_dim], 'float32', initializer=tf.constant_initializer( 0.01))
  103. return tf.nn.bias_add(var,b)
  104. #定义二维反卷积
  105. def Deconv2d(name, tensor, filter_size1, filter_size2, outshape, stride1, stride2, padding, stddev=0.01):
  106. with tf.variable_scope(name):
  107. w = tf.get_variable( 'w', [filter_size1, filter_size2, outshape[ -1], tensor.get_shape()[ -1]], dtype=tf.float32,
  108. initializer=tf.random_normal_initializer(stddev=stddev))
  109. var = tf.nn.conv2d_transpose(tensor, w, outshape, strides=[ 1,stride1, stride2, 1], padding=padding)
  110. b = tf.get_variable( 'b', [outshape[ -1]], 'float32', initializer=tf.constant_initializer( 0.01))
  111. return tf.nn.bias_add(var,b)
  112. def Get_inputs(real_size,noise_size):
  113. real_img = tf.placeholder(tf.float32, [ None, real_size], name= 'real_img')
  114. noise_img = tf.placeholder(tf.float32, [ None, noise_size], name= 'noise_img')
  115. return real_img, noise_img
  116. def Get_noise(noise,batch_size):
  117. if noise == 'uniform':
  118. batch_noise = np.random.uniform( -1, 1, size=(batch_size, noise_size))
  119. elif noise == 'normal':
  120. batch_noise = np.random.normal( -1, 1, size=(batch_size, noise_size))
  121. elif noise == 'normal0_1':
  122. batch_noise = np.random.normal( 0, 1, size=(batch_size, noise_size))
  123. return batch_noise
  124. def Discriminator(img, reuse=False, name='discriminator'):
  125. with tf.variable_scope(name, reuse=reuse):
  126. output = Fully_connected( 'df1',img, 2048)
  127. output = LeakyRelu(output)
  128. output = Fully_connected( 'df2',output, 1024)
  129. output = LeakyRelu(output)
  130. output = Fully_connected( 'df3',output, 512)
  131. output = LeakyRelu(output)
  132. output = Fully_connected( 'df4',output, 256)
  133. output = LeakyRelu(output)
  134. output = Fully_connected( 'df5',output, 1)
  135. return output
  136. def Generator(noise_img, reuse=False, name='generator'):
  137. with tf.variable_scope(name,reuse=reuse):
  138. output = Fully_connected( 'gf1',noise_img, 2048)
  139. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  140. output = tf.nn.relu(output)
  141. output = Fully_connected( 'gf2',output, 1024)
  142. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  143. output = tf.nn.relu(output)
  144. output = Fully_connected( 'gf3',output, 512)
  145. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  146. output = tf.nn.relu(output)
  147. output = Fully_connected( 'gf4',output, 256)
  148. output = tf.layers.batch_normalization(output,momentum= 0.8,training= True)
  149. output = tf.nn.relu(output)
  150. output = Fully_connected( 'gf5',output, 180)
  151. output = tf.nn.tanh(output)
  152. return output
  153. mode = 'wgan-gp' # gan, wgan, wgan-gp
  154. noise = 'uniform' # normal0_1, normal, uniform
  155. batch_size = 100
  156. epochs = 100
  157. n_sample = 100
  158. lamda = 10
  159. img_size = 180
  160. noise_size = 100
  161. tf.reset_default_graph()
  162. real_img, noise_img = Get_inputs(img_size,noise_size) #feed于此
  163. real_data = real_img
  164. fake_data = Generator(noise_img)
  165. disc_real = Discriminator(real_data,reuse= False)
  166. disc_fake = Discriminator(fake_data,reuse= True)
  167. #生成器和判别器中的tensor
  168. train_vars = tf.trainable_variables()
  169. g_vars = [var for var in train_vars if var.name.startswith( "generator")]
  170. d_vars = [var for var in train_vars if var.name.startswith( "discriminator")]
  171. #普通的GAN
  172. if mode == 'gan':
  173. gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
  174. disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
  175. disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
  176. disc_cost /= 2. #判别器loss
  177. #优化器
  178. gen_train_op = tf.train.AdamOptimizer(learning_rate= 2e-4, beta1= 0.5).minimize(gen_cost,var_list=g_vars)
  179. disc_train_op = tf.train.AdamOptimizer(learning_rate= 2e-4,beta1= 0.5).minimize(disc_cost,var_list=d_vars)
  180. clip_disc_weights = None
  181. #wgan
  182. elif mode == 'wgan':
  183. gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
  184. disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
  185. #优化器
  186. gen_train_op = tf.train.RMSPropOptimizer(learning_rate= 5e-5).minimize(gen_cost,var_list=g_vars)
  187. disc_train_op = tf.train.RMSPropOptimizer(learning_rate= 5e-5).minimize(disc_cost,var_list=d_vars)
  188. clip_ops = []
  189. #将判别器权重截断到[-0.01,0.01]
  190. for var in train_vars:
  191. if var.name.startswith( "discriminator"):
  192. clip_bounds = [ -0.01, 0.01]
  193. clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[ 0],clip_bounds[ 1])))
  194. clip_disc_weights = tf.group(*clip_ops)
  195. elif mode == 'wgan-gp':
  196. gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
  197. disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
  198. #梯度惩罚
  199. alpha = tf.random_uniform(shape=[batch_size, 1],minval= 0.,maxval= 1.)
  200. interpolates = alpha*fake_data + ( 1-alpha)*real_data
  201. gradients = tf.gradients(Discriminator(interpolates,reuse= True),[interpolates])[ 0]
  202. slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[ 1]))
  203. gradient_penalty = tf.reduce_mean((slopes -1.)** 2)
  204. disc_cost += lamda * gradient_penalty
  205. clip_disc_weights = None
  206. #优化器
  207. gen_train_op = tf.train.AdamOptimizer(learning_rate= 1e-4,beta1= 0.5,beta2= 0.9).minimize(gen_cost,var_list=g_vars)
  208. disc_train_op = tf.train.AdamOptimizer(learning_rate= 1e-4,beta1= 0.5,beta2= 0.9).minimize(disc_cost,var_list=d_vars)
  209. saver = tf.train.Saver()
  210. def Train():
  211. losses = []
  212. with tf.Session() as sess:
  213. sess.run(tf.global_variables_initializer())
  214. for e in range(epochs):
  215. for i in xrange(len(data)//batch_size):
  216. batch_images = data[i*batch_size:(i+ 1)*batch_size]
  217. batch_images = batch_images.reshape(batch_size, 180)
  218. if noise != 'normal0_1' :
  219. batch_images = batch_images* 2 -1
  220. batch_noise = Get_noise(noise, 100)
  221. if mode == 'gan': #普通的gan,判别器,生成器各训练一次
  222. disc_iters = 1
  223. else: #wgan和wgan-gp,判别器训练多次,生成器训练一次
  224. disc_iters = 6
  225. for x in range( 0, disc_iters):
  226. _,d_loss = sess.run([disc_train_op,disc_cost],feed_dict={real_data:batch_images,noise_img:batch_noise})
  227. if clip_disc_weights is not None:
  228. _ = sess.run(clip_disc_weights)
  229. _,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})
  230. Save_lossValue(e,i,d_loss,g_loss)
  231. print( "第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(e, i, d_loss, g_loss))
  232. losses.append((d_loss,g_loss))
  233. sample_noise = Get_noise(noise, 100)
  234. gen_samples = sess.run(Generator(noise_img,reuse= True),feed_dict={noise_img:sample_noise})
  235. saver.save(sess, 'checkpoints/test2.ckpt')
  236. if e % 1 == 0:
  237. gen = gen_samples.reshape( 100, 3, 60, 1)
  238. if noise != 'normal0_1':
  239. gen = (gen+ 1)/ 2
  240. Save_genImages(gen, e)
  241. plot_loss(losses)
  242. def Test():
  243. saver = tf.train.Saver(var_list=g_vars)
  244. with tf.Session() as sess:
  245. saver.restore(sess,tf.train.latest_checkpoint( "checkpoints"))
  246. # saver.restore(sess,'checkppoints/test2.ckpt')
  247. sample_noise = Get_noise(noise, 10000)
  248. gen_samples = sess.run(Generator(noise_img,reuse= True),feed_dict={noise_img:sample_noise})
  249. if noise != 'normal0_1':
  250. gen_images = (gen_samples+ 1)/ 2
  251. show_num = len(gen_images)// 100
  252. Show_images(gen_images,show_num,save= True)
  253. if __name__ == '__main__':
  254. Train()
  255. #Test()



  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值