from datetime import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
import math
import tensorflow as tf
from datetime import datetime
from six.moves import xrange
np.set_printoptions(suppress=True)
np.set_printoptions(threshold=np.inf)
data = np.load('data/final37在端点处插值.npy')
data = data[:,:,0:60]
###########################################
#显示原始数据图像
def Save_genImages(gen, epoch):
r,c = 10,10
fig,axs = plt.subplots(r,c)
cnt = 0
for i in range(r):
for j in range(c):
#gen[cnt]代表100个分叉中的哪一个分叉
zhu_len = len(gen[cnt][0])
zuo_len = len(gen[cnt][1])
you_len = len(gen[cnt][2])
axs[i,j].plot(gen[cnt][0][0:zhu_len//2],gen[cnt][0][zhu_len//2:zhu_len],color='red')
axs[i,j].plot([gen[cnt][0][zhu_len//2-1]]+gen[cnt][1][0:zuo_len//2],[gen[cnt][0][zhu_len-1]]+gen[cnt][1][zuo_len//2:zuo_len],color='blue')
axs[i,j].plot([gen[cnt][0][zhu_len//2-1]]+gen[cnt][2][0:you_len//2],[gen[cnt][0][zhu_len-1]]+gen[cnt][2][you_len//2:you_len],color='green')
axs[i,j].axis('off')
cnt += 1
if not os.path.exists('testwgan-gp5'):
os.makedirs('testwgan-gp5')
fig.savefig('testwgan-gp5/%d.jpg' % epoch)
plt.close()
def plot_loss(loss):
fig,ax = plt.subplots(figsize=(20,7))
losses = np.array(loss)
plt.plot(losses.T[0], label="Discriminator Loss")
plt.plot(losses.T[1], label="Generator Loss")
plt.title("Training Losses")
plt.legend()
plt.savefig('loss2.jpg')
plt.show()
#定义Relu激活函数
def Relu(name, tensor):
return tf.nn.relu(tensor,name)
#定义LeakyRelu激活函数
def LeakyRelu(x, alpha=0.25):
return tf.maximum(x, alpha * x)
#定义全连接层
def Fully_connected(name, value, output_shape):
with tf.variable_scope(name, reuse=None) as scope:
shape = value.get_shape().as_list()
w = tf.get_variable('w', [shape[1], output_shape], dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=0.01))
b = tf.get_variable('b', [output_shape], dtype=tf.float32, initializer=tf.constant_initializer(0.0))
return tf.matmul(value, w) + b
def Get_inputs(real_size,noise_size):
real_img = tf.placeholder(tf.float32, [None, real_size], name='real_img')
noise_img = tf.placeholder(tf.float32, [None, noise_size], name='noise_img')
return real_img, noise_img
def Get_noise(noise,batch_size):
#设置随机种子
# np.random.seed(1234)
if noise == 'uniform':
batch_noise = np.random.uniform(-1, 1, size=(batch_size, noise_size))
elif noise == 'normal':
batch_noise = np.random.normal(-1, 1, size=(batch_size, noise_size))
elif noise == 'normal0_1':
batch_noise = np.random.normal(0, 1, size=(batch_size, noise_size))
return batch_noise
def Discriminator(img, reuse=False, name='discriminator'):
with tf.variable_scope(name, reuse=reuse):
output = Fully_connected('df1',img,4096)
output = LeakyRelu(output)
output = Fully_connected('df2',output,2048)
output = LeakyRelu(output)
output = Fully_connected('df3',output,1024)
output = LeakyRelu(output)
#
output = Fully_connected('df4',output,512)
output = LeakyRelu(output)
#
output = Fully_connected('df6',output,256)
output = LeakyRelu(output)
#
output = Fully_connected('df7',output,128)
output = LeakyRelu(output)
output = Fully_connected('df5',output,1)
return output
def Generator(noise_img, reuse=False, name='generator'):
with tf.variable_scope(name,reuse=reuse):
output = Fully_connected('gf1',noise_img,4096)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
output = Fully_connected('gf2',output,2048)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
output = Fully_connected('gf3',output,1024)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
#
output = Fully_connected('gf4',output,512)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
#
output = Fully_connected('gf6',output,256)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
#
output = Fully_connected('gf7',output,128)
output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
output = LeakyRelu(output)
output = Fully_connected('gf5',output,180)
output = tf.nn.tanh(output)
return output
mode = 'wgan-gp' # gan, wgan, wgan-gp
noise = 'uniform' # normal0_1, normal, uniform
batch_size = 100
epochs = 250
n_sample = 100
lamda = 10
img_size = 180
noise_size = 100
tf.reset_default_graph()
real_img, noise_img = Get_inputs(img_size,noise_size)#feed于此
real_data = real_img
fake_data = Generator(noise_img)
disc_real = Discriminator(real_data,reuse=False)
disc_fake = Discriminator(fake_data,reuse=True)
#生成器和判别器中的tensor
train_vars = tf.trainable_variables()
g_vars = [var for var in train_vars if var.name.startswith("generator")]
d_vars = [var for var in train_vars if var.name.startswith("discriminator")]
#普通的GAN
if mode == 'gan':
gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
disc_cost /= 2. #判别器loss
#优化器
gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4, beta1=0.5).minimize(gen_cost,var_list=g_vars)
disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5).minimize(disc_cost,var_list=d_vars)
clip_disc_weights = None
#wgan
elif mode == 'wgan':
gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
#优化器
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost,var_list=g_vars)
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost,var_list=d_vars)
clip_ops = []
#将判别器权重截断到[-0.01,0.01]
for var in train_vars:
if var.name.startswith("discriminator"):
clip_bounds = [-0.01, 0.01]
clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[0],clip_bounds[1])))
clip_disc_weights = tf.group(*clip_ops)
elif mode == 'wgan-gp':
gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
disc_cost1 = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
#梯度惩罚
alpha = tf.random_uniform(shape=[batch_size,1],minval=0.,maxval=1.)
interpolates = alpha*fake_data + (1-alpha)*real_data
gradients = tf.gradients(Discriminator(interpolates,reuse=True),[interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost = disc_cost1 + lamda * gradient_penalty
clip_disc_weights = None
#优化器
gen_train_op = tf.train.AdamOptimizer(learning_rate=0.00005,beta1=0.5,beta2=0.9).minimize(gen_cost,var_list=g_vars)
disc_train_op = tf.train.AdamOptimizer(learning_rate=0.00005,beta1=0.5,beta2=0.9).minimize(disc_cost,var_list=d_vars)
saver = tf.train.Saver()
def Train():
losses = []
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for i in xrange(len(data)//batch_size):
batch_images = data[i*batch_size:(i+1)*batch_size]
batch_images = batch_images.reshape(batch_size,180)
if noise != 'normal0_1' :
batch_images = batch_images*2 -1
batch_noise = Get_noise(noise,100)
if mode == 'gan': #普通的gan,判别器,生成器各训练一次
disc_iters = 1
else: #wgan和wgan-gp,判别器训练多次,生成器训练一次
disc_iters = 5
for x in range(0, disc_iters):
_,d_loss,w_distance = sess.run([disc_train_op,disc_cost,disc_cost1],feed_dict={real_data:batch_images,noise_img:batch_noise})
if clip_disc_weights is not None:
_ = sess.run(clip_disc_weights)
_,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})
print(datetime.now().strftime('%c'),"第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f, w_distance: %.8f"%(e, i, d_loss, g_loss, -w_distance))
losses.append((d_loss,g_loss))
sample_noise = Get_noise(noise,100)
gen_samples = sess.run(Generator(noise_img,reuse=True),feed_dict={noise_img:sample_noise})
if e == 50 or e == 100 or e == 150 or e == 200 or e == 249:
saver.save(sess,'test_wgan_gp/wganGP新的插值法%d.ckpt' % e )
if e % 1 == 0:
gen = gen_samples.reshape(100,3,60)
gen = (gen+1)/2
Save_genImages(gen, e)
plot_loss(losses)
def Save_single(arr,mydir):
data_images = arr
data_images = data_images.reshape(-1,3,60)
for i in range(0,len(arr)):
plt.figure(figsize=(128,128),dpi=1)
plt.plot(data_images[i][0][0:30],data_images[i][0][30:60],color='blue',linewidth=300)
plt.plot(data_images[i][1][0:30],data_images[i][1][30:60],color='red',linewidth=300)
plt.plot(data_images[i][2][0:30],data_images[i][2][30:60],color='green',linewidth=300)
plt.axis('off')
plt.savefig(mydir+os.sep+str(i)+'.jpg',dpi=1)
plt.close()
def Manage_gen(gen_imgs):
#gen_imgs一个维度为(-1,3,60)的数组,头部分支的尾部,与左右分支的头部分开了
#目的把头的尾部,加入左右分支头部,并保证,维度不变
gen_imgs = gen_imgs.reshape(-1,3,60)
finaldata = gen_imgs.tolist()
final = []
for i in range(len(finaldata)):
zhu = finaldata[i][0]
zuo = finaldata[i][1]
you = finaldata[i][2]
#单独分开x,y,列表
zhu_x = zhu[0:30]
zhu_y = zhu[30:60]
zuo_x = zuo[0:30]
zuo_y = zuo[30:60]
you_x = you[0:30]
you_y = you[30:60]
############################################
#真实数据主分支最后两个基本相等,所以生成数据也是,这样计算角度时,就应该计算最后一个和倒数第三个点
#为了让主分支最后一个加在左右分支的头部,此处先去掉左右分支的最后一个点,因为端点插入的值都是相等的,所以去掉影响不大
#然后,再将主分支的尾部,加入左右分支头部,这样,就保证了维度不变
#去除左右分支尾部一个数
del zuo_x[-1]
del zuo_y[-1]
del you_x[-1]
del you_y[-1]
#在左右分支的头部插入主分支的尾部
zuo_x.insert(0,zhu_x[-1])
zuo_y.insert(0,zhu_y[-1])
you_x.insert(0,zhu_x[-1])
you_y.insert(0,zhu_y[-1])
zhu_x.extend(zhu_y)
zuo_x.extend(zuo_y)
you_x.extend(you_y)
fencha = [zhu_x] +[zuo_x] + [you_x]
final.append(fencha)
final = np.array(final)#一个维度为(-1,3,60)的数组
return final
def Plt_images(imgs):
for i in range(len(imgs)):
plt.plot(imgs[i][0][0:30],imgs[i][0][30:60],color='red')
plt.plot(imgs[i][1][0:30],imgs[i][1][30:60],color='blue')
plt.plot(imgs[i][2][0:30],imgs[i][2][30:60],color='green')
plt.show()
def Test():
saver = tf.train.Saver()
with tf.Session() as sess:
# saver.restore(sess,tf.train.latest_checkpoint("Save_wgan"))
saver.restore(sess,'test_wgan_gp/wganGP新的插值法50.ckpt')
sample_noise = Get_noise(noise,2000) #真实数据和噪声数量一样
gen_imgs = sess.run(Generator(noise_img,reuse=True),feed_dict={noise_img:sample_noise})
gen_imgs = (gen_imgs+1)/2
##############################################################################################
real_img = data.reshape((-1,180))
real_img = real_img[0:2000,:]
print(sample_noise.shape)
print(real_img.shape)
#################################################################################################
#好的坏的都生成出来
# gen_samples = sess.run(Generator(noise_img,reuse=True),feed_dict={noise_img:sample_noise})
# gen_samples = Manage_gen(gen_imgs)
# Save_single(gen_samples,mydir='bad')
#################################################################################################
#抽取wasserstein距离小的生成图
gen = []
for i in range(len(sample_noise)//100):
batch_noise = sample_noise[i*100:(i+1)*100,:].reshape(-1,100) #选用与训练时批次大小一样的数量,共享参数导致
batch_real = real_img[i*100:(i+1)*100,:].reshape(-1,180)
wasserstein = sess.run(disc_cost,feed_dict = {noise_img:batch_noise,real_data:batch_real})
wasserstein = abs(wasserstein)
# print(wasserstein)
if wasserstein<0.50:
gen_images = sess.run(Generator(noise_img,reuse=True),feed_dict={noise_img:batch_noise})
gen_images = Manage_gen(gen_images)
gen.append(gen_images)
gen = np.array(gen)
gen = gen.reshape(-1,180)
print(gen.shape)
Save_single(gen,mydir='bad')
# ##################################################################################
# np.save("wgan-gp200最小距离.npy",gen)
##################################################################################
if __name__ == '__main__':
# Train()
Test()