GAN生成重新fianl37,端点插入相等值

final37主分支,因为后两个坐标大部分相等,所以去掉了最后一个坐标

方法1:判别器reshape(-1,1024)

import os  
import matplotlib.pyplot as plt  
import numpy as np  
import tensorflow as tf  
from six.moves import xrange  
from numba.tests.test_types import gen
data = np.load('data/final37端点补充相等值.npy')
print(data.shape)
data = data[:,:,0:60]  
print(data.shape)
np.set_printoptions(threshold=np.inf)
#显示原始数据图像  
def Show_images(data,show_nums,save=False):  
    index = 0  
    for n in range(show_nums):  
        show_images = data[index:index+100]  
        show_images = show_images.reshape(100,3,60,1)  
        r,c = 10,10  
        fig,axs = plt.subplots(r,c)  
        cnt = 0  
        for i in range(r):  
            for j in range(c):  
                xy = show_images[cnt]  
                for k in range(len(xy)):  
                    x = xy[k][0:30]  
                    y = xy[k][30:60]  
                    if k == 0 :  
                        axs[i,j].plot(x,y,color='blue',linewidth=2)  
                    if k == 1:  
                        axs[i,j].plot(x,y,color='red',linewidth=2)  
                    if k == 2:  
                        axs[i,j].plot(x,y,color='green',linewidth=2)  
                        axs[i,j].axis('off')  
                cnt += 1  
        index += 100  
        if save:  
            if not os.path.exists('gen4'):  
                os.makedirs('gen4')  
            fig.savefig('gen4/%d.jpg' % n)  
            plt.close()  
        else:  
            plt.show()  
              
def Save_genImages(gen, epoch):  
    r,c = 10,10  
    fig,axs = plt.subplots(r,c)  
    cnt = 0  
    for i in range(r):  
        for j in range(c):  
            xy = gen[cnt]  
            for k in range(len(xy)):  
                x = xy[k][0:30]  
                y = xy[k][30:60]  
                if k == 0:  
                    axs[i,j].plot(x,y,color='blue')  
                if k == 1:  
                    axs[i,j].plot(x,y,color='red')  
                if k == 2:  
                    axs[i,j].plot(x,y,color='green')  
                    axs[i,j].axis('off')  
            cnt += 1  
    if not os.path.exists('Manage0'):  
        os.makedirs('Manage0')  
    fig.savefig('Manage0/%d.jpg' % epoch)  
    plt.close()  
def Manage_gen(gen_imgs):
    #gen_imgs一个维度为(-1,3,60)的数组,头部分支的尾部,与左右分支的头部分开了
    #目的把头的尾部,加入左右分支头部,并保证,维度不变
    gen_imgs = gen_imgs.reshape(-1,3,60)
    finaldata = gen_imgs.tolist()
    final = []
    for i in range(len(finaldata)):
        zhu = finaldata[i][0]
        zuo = finaldata[i][1]
        you = finaldata[i][2]
        #单独分开x,y,列表
        zhu_x = zhu[0:30]
        zhu_y = zhu[30:60]
        zuo_x = zuo[0:30]
        zuo_y = zuo[30:60]
        you_x = you[0:30]
        you_y = you[30:60]
        ############################################
        #真实数据主分支最后两个基本相等,所以生成数据也是,这样计算角度时,就应该计算最后一个和倒数第三个点
        #为了让主分支最后一个加在左右分支的头部,此处先去掉左右分支的最后一个点,因为端点插入的值都是相等的,所以去掉影响不大
        #然后,再将主分支的尾部,加入左右分支头部,这样,就保证了维度不变
        #去除左右分支尾部一个数
        del zuo_x[-1]
        del zuo_y[-1]
        del you_x[-1]
        del you_y[-1]
        #在左右分支的头部插入主分支的尾部
        zuo_x.insert(0,zhu_x[-1])
        zuo_y.insert(0,zhu_y[-1])
        you_x.insert(0,zhu_x[-1])
        you_y.insert(0,zhu_y[-1])
        zhu_x.extend(zhu_y)
        zuo_x.extend(zuo_y)
        you_x.extend(you_y)
        fencha = [zhu_x] +[zuo_x] + [you_x]
        final.append(fencha)
    final = np.array(final)#一个维度为(-1,3,60)的数组
    return final

def Save_lossValue(epoch,iters,d_loss,g_loss):  
    with open('loss3.txt','a') as f:  
        f.write("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+'\n')  
        
def plot_loss(loss):  
    fig,ax = plt.subplots(figsize=(20,7))  
    losses = np.array(loss)  
    plt.plot(losses.T[0], label="Discriminator Loss")  
    plt.plot(losses.T[1], label="Generator Loss")  
    plt.title("Training Losses")  
    plt.legend()  
    plt.savefig('loss4.jpg')  
    plt.show()  
    
#定义Relu激活函数  
def Relu(name, tensor):  
    return tf.nn.relu(tensor,name)  
  
#定义LeakyRelu激活函数  
def LeakyRelu(x, alpha=0.25):  
    return tf.maximum(x, alpha * x)  
  
#定义全连接层  
def Fully_connected(name, value, output_shape):  
    with tf.variable_scope(name, reuse=None) as scope:  
        shape = value.get_shape().as_list()  
        w = tf.get_variable('w', [shape[1], output_shape], dtype=tf.float32,  
                                    initializer=tf.random_normal_initializer(stddev=0.01))  
        b = tf.get_variable('b', [output_shape], dtype=tf.float32, initializer=tf.constant_initializer(0.0))  
  
        return tf.matmul(value, w) + b  
      
def Get_inputs(real_size,noise_size):  
        real_img = tf.placeholder(tf.float32, [None, real_size], name='real_img')  
        noise_img = tf.placeholder(tf.float32, [None, noise_size], name='noise_img')  
          
        return real_img, noise_img  
 
def Get_noise(noise,batch_size):
    if noise == 'uniform':
            batch_noise = np.random.uniform(-1, 1, size=(batch_size, noise_size))
    elif noise == 'normal':
            batch_noise = np.random.normal(-1, 1, size=(batch_size, noise_size))
    elif noise == 'normal0_1':
            batch_noise = np.random.normal(0, 1, size=(batch_size, noise_size))
            
    return batch_noise
 
def Discriminator(img, reuse=False, name='discriminator'):  
    with tf.variable_scope(name, reuse=reuse):
  
        output = Fully_connected('df1',img,1024)
        output = LeakyRelu(output)
          
#         output = Fully_connected('df2',output,512)
#         output = LeakyRelu(output)
            
        output = Fully_connected('df3',output,1024)
        output = LeakyRelu(output)
   
#         output = Fully_connected('df4',output,256)
#         output = LeakyRelu(output)
           
#         output = Fully_connected('df6',output,1024)
#         output = LeakyRelu(output)
#             
#         output = Fully_connected('df5',output,1)
        
        prob = tf.sigmoid(output)
        
        return  output,prob
    
def Generator(noise_img, reuse=False, name='generator'):
    with tf.variable_scope(name,reuse=reuse):
             
        output = Fully_connected('gf1',noise_img,1024)
        output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
        output = LeakyRelu(output)
             
        output = Fully_connected('gf2',output,1024)
        output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
        output = LeakyRelu(output)
          
#         output = Fully_connected('gf3',output,512)
#         output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
#         output = LeakyRelu(output)
           
        output = Fully_connected('gf4',output,512)
        output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
        output = tf.nn.relu(output)
          
#         output = Fully_connected('gf6',output,256)
#         output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
#         output = tf.nn.relu(output)
          
            
        output = Fully_connected('gf5',output,180)
        
        output = tf.nn.tanh(output)
        return output
mode = 'gan' # gan, wgan, wgan-gp
noise = 'uniform' # normal0_1, normal, uniform  
batch_size = 100  
epochs = 10
n_sample = 100  
lamda = 10  
img_size  = 180  
noise_size = 100  
  
tf.reset_default_graph()  
  
real_img, noise_img = Get_inputs(img_size,noise_size)#feed于此  
real_data = real_img  
fake_data = Generator(noise_img)  
  
disc_real, disc_prob_real = Discriminator(real_data,reuse=False)  
disc_fake, disc_prob_fake = Discriminator(fake_data,reuse=True)  
 
  
#生成器和判别器中的tensor  
train_vars = tf.trainable_variables()  
g_vars = [var for var in train_vars if var.name.startswith("generator")]  
d_vars = [var for var in train_vars if var.name.startswith("discriminator")]  
 
#普通的GAN
if mode == 'gan':
    #第一种生成器loss,判别器越好,梯度越是容易消失,最优情况下判别器判别真假概率都为0.5
    gen_cost = tf.reduce_mean(tf.log(1-disc_prob_fake))
    disc_cost = -tf.reduce_mean(tf.log(disc_prob_real)+tf.log(1-disc_prob_fake))

    #第二种生成器loss,容易出现模式崩溃,梯度不稳定
#     disc_cost = tf.reduce_mean(-tf.log(disc_prob_real)-tf.log(1-disc_prob_fake))
#     gen_cost = tf.reduce_mean(-tf.log((disc_prob_fake)))
    
#     gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
#     disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
#     disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
#     disc_cost /= 2. #判别器loss
    #优化器
    gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5).minimize(gen_cost,var_list=g_vars)
    disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5).minimize(disc_cost,var_list=d_vars)
    clip_disc_weights = None
    
#wgan
elif mode == 'wgan':
    gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
    disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
    
    #优化器
    gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost,var_list=g_vars)
    disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost,var_list=d_vars)
    clip_ops = []
    #将判别器权重截断到[-0.01,0.01]
    for var in train_vars:
        if var.name.startswith("discriminator"):
            clip_bounds = [-0.01, 0.01]
            clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[0],clip_bounds[1])))
    clip_disc_weights = tf.group(*clip_ops)
    
elif mode == 'wgan-gp':
    gen_cost = -tf.reduce_mean(disc_fake) #生成器loss  
    disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss 
       
    #梯度惩罚
    alpha = tf.random_uniform(shape=[batch_size,1],minval=0.,maxval=1.)  
    interpolates = alpha*fake_data + (1-alpha)*real_data  
    gradients = tf.gradients(Discriminator(interpolates,reuse=True),[interpolates])[0]  
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[1]))  
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)  
    disc_cost += lamda * gradient_penalty  
    clip_disc_weights = None
  
    #优化器  
    gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(gen_cost,var_list=g_vars)  
    disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(disc_cost,var_list=d_vars)  
  
saver = tf.train.Saver()  
    
def Train():  
    losses = []  
    with tf.Session() as sess:  
        sess.run(tf.global_variables_initializer())  
        for e in range(epochs):  
            for i in xrange(len(data)//batch_size):  
                batch_images = data[i*batch_size:(i+1)*batch_size]  
                batch_images = batch_images.reshape(batch_size,180) 
                if noise != 'normal0_1' :
                    batch_images = batch_images*2 -1 
                batch_noise = Get_noise(noise,100)
                if mode == 'gan': #普通的gan,判别器,生成器各训练一次
                    disc_iters = 1
                else:             #wgan和wgan-gp,判别器训练多次,生成器训练一次
                    disc_iters = 5 
                for x in range(0, disc_iters):  
                    _,d_loss,d_acc,d_fake = sess.run([disc_train_op,disc_cost,disc_prob_real,disc_prob_fake],feed_dict={real_data:batch_images,noise_img:batch_noise}) 
                    if clip_disc_weights is not None:
                        _ = sess.run(clip_disc_weights) 
                for k in range(1):
                    _,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})  
                Save_lossValue(e,i,d_loss,g_loss)  
                if i % 50 == 0:
                    print("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f, d_real_acc: %.8f, d_fake_acc: %.8f"%(e, i, d_loss, g_loss, d_acc.mean(),d_fake.mean()))  
            losses.append((d_loss,g_loss))
            
            if e == 2 or e == 4 or e ==  6 or e == 8 or e == 8:
                saver.save(sess,'Manamge/gan%d.ckpt' % e )
                
            sample_noise = Get_noise(noise,100)
            gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise})
            real_samples_prob = sess.run(disc_prob_real,feed_dict={real_data:batch_images,noise_img:sample_noise}) 
            print("判别真的概率:\n",real_samples_prob.mean()) 
            print("判别假的概率: \n",gen_samples_prob.mean())

            if e % 1 == 0:  
                gen = gen_samples.reshape(100,3,60,1)
                gen = (gen+1)/2
                gen_images = Manage_gen(gen)
                Save_genImages(gen_images, e)  
        plot_loss(losses)               
def Save_single(arr,mydir):
    data_images = arr
    data_images = data_images.reshape(-1,3,60)
    for i in range(1,len(arr)):
        plt.figure(figsize=(128,128),dpi=1)
        plt.plot(data_images[i][0][0:30],data_images[i][0][30:60],color='blue',linewidth=300)
        plt.plot(data_images[i][1][0:30],data_images[i][1][30:60],color='red',linewidth=300)
        plt.plot(data_images[i][2][0:30],data_images[i][2][30:60],color='green',linewidth=300)
        plt.axis('off')
        plt.savefig(mydir+os.sep+str(i)+'.jpg',dpi=1)
        plt.close()
def Test():  
    saver = tf.train.Saver()  
    with tf.Session() as sess:  
#         saver.restore(sess,tf.train.latest_checkpoint("gan_checkpoints"))  
        saver.restore(sess,'Manamge/gan8.ckpt')  
        sample_noise = Get_noise(noise,2000)
        gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise}) 
        prob = sess.run(tf.reduce_mean(gen_samples_prob,1))
        gen_images = (gen_samples+1)/2 
        gen_images = Manage_gen(gen_images)
        good = [] 
        bad = []
        for i in range(len(prob)):
            #bad 0<p<1.5 good 0.49<p<0.51 better 0.95<p<1
#             if prob[i]>0.9:
            if 0.49<prob[i]<0.51:  
                good.append(gen_images[i])
#             elif prob[i]<0.51:
#                 bad.append(gen_images[i])
        good = np.array(good)
        bad = np.array(bad)
        print(good.shape)
#         print(bad.shape)
#         np.save("mygood100.npy",good)
#         Save_single(bad,mydir='bad')
        Save_single(good,mydir='a_good')
if __name__ == '__main__':  
#     Train()  
    Test()  

方法二:判别器reshape(-1,1)

import os  
import matplotlib.pyplot as plt  
import numpy as np  
import tensorflow as tf  
from six.moves import xrange  
data = np.load('data/final37端点补充相等值.npy')  
print(data.shape)
data = data[:,:,0:60]  
print(data.shape)
#显示原始数据图像  
def Show_images(data,show_nums,save=False):  
    index = 0  
    for n in range(show_nums):  
        show_images = data[index:index+100]  
        show_images = show_images.reshape(100,3,60,1)  
        r,c = 10,10  
        fig,axs = plt.subplots(r,c)  
        cnt = 0  
        for i in range(r):  
            for j in range(c):  
                xy = show_images[cnt]  
                for k in range(len(xy)):  
                    x = xy[k][0:30]  
                    y = xy[k][30:60]  
                    if k == 0 :  
                        axs[i,j].plot(x,y,color='blue',linewidth=2)  
                    if k == 1:  
                        axs[i,j].plot(x,y,color='red',linewidth=2)  
                    if k == 2:  
                        axs[i,j].plot(x,y,color='green',linewidth=2)  
                        axs[i,j].axis('off')  
                cnt += 1  
        index += 100  
        if save:  
            if not os.path.exists('gan0'):  
                os.makedirs('gan0')  
            fig.savefig('gan0/%d.jpg' % n)  
            plt.close()  
        else:  
            plt.show()  
              
def Save_genImages(gen, epoch):  
    r,c = 10,10  
    fig,axs = plt.subplots(r,c)  
    cnt = 0  
    for i in range(r):  
        for j in range(c):  
            xy = gen[cnt]  
            for k in range(len(xy)):  
                x = xy[k][0:30]  
                y = xy[k][30:60]  
                if k == 0:  
                    axs[i,j].plot(x,y,color='blue')  
                if k == 1:  
                    axs[i,j].plot(x,y,color='red')  
                if k == 2:  
                    axs[i,j].plot(x,y,color='green')  
                    axs[i,j].axis('off')  
            cnt += 1  
    if not os.path.exists('gen0'):  
        os.makedirs('gen0')  
    fig.savefig('gen0/%d.jpg' % epoch)  
    plt.close() 
def Manage_gen(gen_imgs):
    #gen_imgs一个维度为(-1,3,60)的数组,头部分支的尾部,与左右分支的头部分开了
    #目的把头的尾部,加入左右分支头部,并保证,维度不变
    gen_imgs = gen_imgs.reshape(-1,3,60)
    finaldata = gen_imgs.tolist()
    final = []
    for i in range(len(finaldata)):
        zhu = finaldata[i][0]
        zuo = finaldata[i][1]
        you = finaldata[i][2]
        #单独分开x,y,列表
        zhu_x = zhu[0:30]
        zhu_y = zhu[30:60]
        zuo_x = zuo[0:30]
        zuo_y = zuo[30:60]
        you_x = you[0:30]
        you_y = you[30:60]
        ############################################
        #真实数据主分支最后两个基本相等,所以生成数据也是,这样计算角度时,就应该计算最后一个和倒数第三个点
        #为了让主分支最后一个加在左右分支的头部,此处先去掉左右分支的最后一个点,因为端点插入的值都是相等的,所以去掉影响不大
        #然后,再将主分支的尾部,加入左右分支头部,这样,就保证了维度不变
        #去除左右分支尾部一个数
        del zuo_x[-1]
        del zuo_y[-1]
        del you_x[-1]
        del you_y[-1]
        #在左右分支的头部插入主分支的尾部
        zuo_x.insert(0,zhu_x[-1])
        zuo_y.insert(0,zhu_y[-1])
        you_x.insert(0,zhu_x[-1])
        you_y.insert(0,zhu_y[-1])
        zhu_x.extend(zhu_y)
        zuo_x.extend(zuo_y)
        you_x.extend(you_y)
        fencha = [zhu_x] +[zuo_x] + [you_x]
        final.append(fencha)
    final = np.array(final)#一个维度为(-1,3,60)的数组
    return final
 
def Save_lossValue(epoch,iters,d_loss,g_loss):  
    with open('loss2.txt','a') as f:  
        f.write("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+'\n')  
def plot_loss(loss):  
    fig,ax = plt.subplots(figsize=(20,7))  
    losses = np.array(loss)  
    plt.plot(losses.T[0], label="Discriminator Loss")  
    plt.plot(losses.T[1], label="Generator Loss")  
    plt.title("Training Losses")  
    plt.legend()  
    plt.savefig('loss3.jpg')  
    plt.show()  
#定义Relu激活函数  
def Relu(name, tensor):  
    return tf.nn.relu(tensor,name)  
  
#定义LeakyRelu激活函数  
def LeakyRelu(x, alpha=0.2):  
    return tf.maximum(x, alpha * x)  
  
#定义全连接层  
def Fully_connected(name, value, output_shape):  
    with tf.variable_scope(name, reuse=None) as scope:  
        shape = value.get_shape().as_list()  
        w = tf.get_variable('w', [shape[1], output_shape], dtype=tf.float32,  
                                    initializer=tf.random_normal_initializer(stddev=0.01))  
        b = tf.get_variable('b', [output_shape], dtype=tf.float32, initializer=tf.constant_initializer(0.0))  
  
        return tf.matmul(value, w) + b  
      
def Get_inputs(real_size,noise_size):  
        real_img = tf.placeholder(tf.float32, [None, real_size], name='real_img')  
        noise_img = tf.placeholder(tf.float32, [None, noise_size], name='noise_img')  
          
        return real_img, noise_img  
 
def Get_noise(noise,batch_size):
    if noise == 'uniform':
            batch_noise = np.random.uniform(-1, 1, size=(batch_size, noise_size))
    elif noise == 'normal':
            batch_noise = np.random.normal(-1, 1, size=(batch_size, noise_size))
    elif noise == 'normal0_1':
            batch_noise = np.random.normal(0, 1, size=(batch_size, noise_size))
            
    return batch_noise
 
def Discriminator(img, reuse=False, name='discriminator'):  
    with tf.variable_scope(name, reuse=reuse):
  
        output = Fully_connected('df1',img,2048)
        output = LeakyRelu(output)
          
        output = Fully_connected('df2',output,1024)
        output = LeakyRelu(output)
            
        output = Fully_connected('df3',output,512)
        output = LeakyRelu(output)
  
        output = Fully_connected('df4',output,256)
        output = LeakyRelu(output)
        
#         output = Fully_connected('df6',output,128)
#         output = LeakyRelu(output)
#         
#         output = Fully_connected('df7',output,64)
#         output = LeakyRelu(output)
        
        output = Fully_connected('df5',output,1)
        prob = tf.sigmoid(output)
        
        return  output,prob
    
def Generator(noise_img, reuse=False, name='generator'):
    with tf.variable_scope(name,reuse=reuse):
             
        output = Fully_connected('gf1',noise_img,2048)
        output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
        output = LeakyRelu(output)
             
        output = Fully_connected('gf2',output,1024)
        output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
        output = LeakyRelu(output)
          
        output = Fully_connected('gf3',output,512)
        output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
        output = LeakyRelu(output)
          
        output = Fully_connected('gf4',output,256)
        output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
        output = LeakyRelu(output)
        
#         output = Fully_connected('gf6',output,128)
#         output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
#         output = LeakyRelu(output)
#         
#         output = Fully_connected('gf7',output,64)
#         output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
#         output = LeakyRelu(output)
            
        output = Fully_connected('gf5',output,180)
        output = tf.nn.tanh(output)
        return output
    
mode = 'gan' # gan, wgan, wgan-gp
noise = 'uniform' # normal0_1, normal, uniform  
batch_size = 100  
epochs = 50
n_sample = 100  
lamda = 10  
img_size  = 180  
noise_size = 100  
  
tf.reset_default_graph()  
  
real_img, noise_img = Get_inputs(img_size,noise_size)#feed于此  
real_data = real_img  
fake_data = Generator(noise_img)  
  
disc_real, disc_prob_real = Discriminator(real_data,reuse=False)  
disc_fake, disc_prob_fake = Discriminator(fake_data,reuse=True)  
 
  
#生成器和判别器中的tensor  
train_vars = tf.trainable_variables()  
g_vars = [var for var in train_vars if var.name.startswith("generator")]  
d_vars = [var for var in train_vars if var.name.startswith("discriminator")]  
 
#普通的GAN
if mode == 'gan':
    
    #第一种生成器loss,判别器越好,梯度越是容易消失,最优情况下判别器判别真假概率都为0.5
#     gen_cost = tf.reduce_mean(tf.log(1-disc_prob_fake))
#     disc_cost = -tf.reduce_mean(tf.log(disc_prob_real)+tf.log(1-disc_prob_fake))

    #第二种生成器loss,容易出现模式崩溃,梯度不稳定
#     disc_cost = tf.reduce_mean(-tf.log(disc_prob_real)-tf.log(1-disc_prob_fake))
#     gen_cost = tf.reduce_mean(-tf.log((disc_prob_fake)))

    gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
    disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
    disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
    disc_cost /= 2. #判别器loss
    #优化器
    gen_train_op = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5).minimize(gen_cost,var_list=g_vars)
    disc_train_op = tf.train.AdamOptimizer(learning_rate=0.0002,beta1=0.5).minimize(disc_cost,var_list=d_vars)
    clip_disc_weights = None
    
#wgan
elif mode == 'wgan':
    gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
    disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
    
    #优化器
    gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost,var_list=g_vars)
    disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost,var_list=d_vars)
    clip_ops = []
    #将判别器权重截断到[-0.01,0.01]
    for var in train_vars:
        if var.name.startswith("discriminator"):
            clip_bounds = [-0.01, 0.01]
            clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[0],clip_bounds[1])))
    clip_disc_weights = tf.group(*clip_ops)
    
elif mode == 'wgan-gp':
    gen_cost = -tf.reduce_mean(disc_fake) #生成器loss  
    disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss 
       
    #梯度惩罚
    alpha = tf.random_uniform(shape=[batch_size,1],minval=0.,maxval=1.)  
    interpolates = alpha*fake_data + (1-alpha)*real_data  
    disc_fake, disc_prob_fake = Discriminator(interpolates,reuse=True)
    gradients = tf.gradients(disc_fake,[interpolates])[0]  
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[1]))  
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)  
    disc_cost += lamda * gradient_penalty  
    clip_disc_weights = None
  
    #优化器  
    gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(gen_cost,var_list=g_vars)  
    disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(disc_cost,var_list=d_vars)  
  
saver = tf.train.Saver()  
    
def Train():  
    losses = []  
    with tf.Session() as sess:  
        sess.run(tf.global_variables_initializer())  
        for e in range(epochs):  
            for i in xrange(len(data)//batch_size):  
                batch_images = data[i*batch_size:(i+1)*batch_size]  
                batch_images = batch_images.reshape(batch_size,180) 
                batch_images = batch_images*2 -1 
                batch_noise = Get_noise(noise,100)
                if mode == 'gan': #普通的gan,判别器,生成器各训练一次
                    disc_iters = 1
                else:             #wgan和wgan-gp,判别器训练多次,生成器训练一次
                    disc_iters = 5 
                for x in range(0, disc_iters):  
                    _,d_loss = sess.run([disc_train_op,disc_cost],feed_dict={real_data:batch_images,noise_img:batch_noise}) 
                    if clip_disc_weights is not None:
                        _ = sess.run(clip_disc_weights) 
                for k in range(1):
                    _,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})  
                Save_lossValue(e,i,d_loss,g_loss) 
                if i % 50 == 0:
                    print("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f" % (e, i, d_loss, g_loss))  
            losses.append((d_loss,g_loss))
                  
            sample_noise = Get_noise(noise,100)
            gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise})
            real_samples_prob = sess.run(disc_prob_real,feed_dict={real_data:batch_images,noise_img:sample_noise}) 
            print("判别真的概率:\n",real_samples_prob) 
            print("判别假的概率: \n",gen_samples_prob)
            if e == 10 or  e == 20 or e == 30 or e ==  40 or e == 49:
                saver.save(sess,'checkpoints/a%d.ckpt' % e)  
            if e % 1 == 0:  
                gen = gen_samples.reshape(100,3,60,1)
                gen = (gen+1)/2
                gen_images = Manage_gen(gen)
                Save_genImages(gen_images, e)  
        plot_loss(losses)               
def Save_single(arr,mydir):
    data_images = arr
    data_images = data_images.reshape(-1,3,60)
    for i in range(1,len(arr)):
        plt.figure(figsize=(128,128),dpi=1)
        plt.plot(data_images[i][0][0:30],data_images[i][0][30:60],color='blue',linewidth=300)
        plt.plot(data_images[i][1][0:30],data_images[i][1][30:60],color='red',linewidth=300)
        plt.plot(data_images[i][2][0:30],data_images[i][2][30:60],color='green',linewidth=300)
        plt.axis('off')
        plt.savefig(mydir+os.sep+str(i)+'.jpg',dpi=1)
        plt.close()
def Test():  
    saver = tf.train.Saver()  
    with tf.Session() as sess:  
        saver.restore(sess,tf.train.latest_checkpoint("gan_checkpoint"))  
#         saver.restore(sess,'checkppoints/test2.ckpt')  
        sample_noise = Get_noise(noise,1000)
        gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise}) 
        print(gen_samples_prob)
        gen_images = (gen_samples+1)/2  
        for i in range(len(gen_samples)):
            if gen_samples_prob[i]<0.3:
                Save_single(gen_images,mydir='bad')
            elif 0.5<gen_samples_prob[i]<0.6:
                Save_single(gen_images,mydir='good')
            elif gen_samples_prob[i]>0.8:
                Save_single(gen_images,mydir='better_good')
  
if __name__ == '__main__':  
    Train()  
#     Test()  

 方法三:能生成比较好的结果

import os  
import matplotlib.pyplot as plt  
import numpy as np  
import tensorflow as tf  
from six.moves import xrange  
from numba.tests.test_types import gen
data = np.load('data/final37端点补充相等值.npy')
print(data.shape)
data = data[:,:,0:60]  
print(data.shape)
np.set_printoptions(threshold=np.inf)
#显示原始数据图像  
def Show_images(data,show_nums,save=False):  
    index = 0  
    for n in range(show_nums):  
        show_images = data[index:index+100]  
        show_images = show_images.reshape(100,3,60,1)  
        r,c = 10,10  
        fig,axs = plt.subplots(r,c)  
        cnt = 0  
        for i in range(r):  
            for j in range(c):  
                xy = show_images[cnt]  
                for k in range(len(xy)):  
                    x = xy[k][0:30]  
                    y = xy[k][30:60]  
                    if k == 0 :  
                        axs[i,j].plot(x,y,color='blue',linewidth=2)  
                    if k == 1:  
                        axs[i,j].plot(x,y,color='red',linewidth=2)  
                    if k == 2:  
                        axs[i,j].plot(x,y,color='green',linewidth=2)  
                        axs[i,j].axis('off')  
                cnt += 1  
        index += 100  
        if save:  
            if not os.path.exists('gen4'):  
                os.makedirs('gen4')  
            fig.savefig('gen4/%d.jpg' % n)  
            plt.close()  
        else:  
            plt.show()  
              
def Save_genImages(gen, epoch):  
    r,c = 10,10  
    fig,axs = plt.subplots(r,c)  
    cnt = 0  
    for i in range(r):  
        for j in range(c):  
            xy = gen[cnt]  
            for k in range(len(xy)):  
                x = xy[k][0:30]  
                y = xy[k][30:60]  
                if k == 0:  
                    axs[i,j].plot(x,y,color='blue')  
                if k == 1:  
                    axs[i,j].plot(x,y,color='red')  
                if k == 2:  
                    axs[i,j].plot(x,y,color='green')  
                    axs[i,j].axis('off')  
            cnt += 1  
    if not os.path.exists('Manage0'):  
        os.makedirs('Manage0')  
    fig.savefig('Manage0/%d.jpg' % epoch)  
    plt.close()  
def Manage_gen(gen_imgs):
    #gen_imgs一个维度为(-1,3,60)的数组,头部分支的尾部,与左右分支的头部分开了
    #目的把头的尾部,加入左右分支头部,并保证,维度不变
    gen_imgs = gen_imgs.reshape(-1,3,60)
    finaldata = gen_imgs.tolist()
    final = []
    for i in range(len(finaldata)):
        zhu = finaldata[i][0]
        zuo = finaldata[i][1]
        you = finaldata[i][2]
        #单独分开x,y,列表
        zhu_x = zhu[0:30]
        zhu_y = zhu[30:60]
        zuo_x = zuo[0:30]
        zuo_y = zuo[30:60]
        you_x = you[0:30]
        you_y = you[30:60]
        ############################################
        #真实数据主分支最后两个基本相等,所以生成数据也是,这样计算角度时,就应该计算最后一个和倒数第三个点
        #为了让主分支最后一个加在左右分支的头部,此处先去掉左右分支的最后一个点,因为端点插入的值都是相等的,所以去掉影响不大
        #然后,再将主分支的尾部,加入左右分支头部,这样,就保证了维度不变
        #去除左右分支尾部一个数
        del zuo_x[-1]
        del zuo_y[-1]
        del you_x[-1]
        del you_y[-1]
        #在左右分支的头部插入主分支的尾部
        zuo_x.insert(0,zhu_x[-1])
        zuo_y.insert(0,zhu_y[-1])
        you_x.insert(0,zhu_x[-1])
        you_y.insert(0,zhu_y[-1])
        zhu_x.extend(zhu_y)
        zuo_x.extend(zuo_y)
        you_x.extend(you_y)
        fencha = [zhu_x] +[zuo_x] + [you_x]
        final.append(fencha)
    final = np.array(final)#一个维度为(-1,3,60)的数组
    return final

def Save_lossValue(epoch,iters,d_loss,g_loss):  
    with open('loss3.txt','a') as f:  
        f.write("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+'\n')  
        
def plot_loss(loss):  
    fig,ax = plt.subplots(figsize=(20,7))  
    losses = np.array(loss)  
    plt.plot(losses.T[0], label="Discriminator Loss")  
    plt.plot(losses.T[1], label="Generator Loss")  
    plt.title("Training Losses")  
    plt.legend()  
    plt.savefig('loss4.jpg')  
    plt.show()  
    
#定义Relu激活函数  
def Relu(name, tensor):  
    return tf.nn.relu(tensor,name)  
  
#定义LeakyRelu激活函数  
def LeakyRelu(x, alpha=0.25):  
    return tf.maximum(x, alpha * x)  
  
#定义全连接层  
def Fully_connected(name, value, output_shape):  
    with tf.variable_scope(name, reuse=None) as scope:  
        shape = value.get_shape().as_list()  
        w = tf.get_variable('w', [shape[1], output_shape], dtype=tf.float32,  
                                    initializer=tf.random_normal_initializer(stddev=0.01))  
        b = tf.get_variable('b', [output_shape], dtype=tf.float32, initializer=tf.constant_initializer(0.0))  
  
        return tf.matmul(value, w) + b  
      
def Get_inputs(real_size,noise_size):  
        real_img = tf.placeholder(tf.float32, [None, real_size], name='real_img')  
        noise_img = tf.placeholder(tf.float32, [None, noise_size], name='noise_img')  
          
        return real_img, noise_img  
 
def Get_noise(noise,batch_size):
    if noise == 'uniform':
            batch_noise = np.random.uniform(-1, 1, size=(batch_size, noise_size))
    elif noise == 'normal':
            batch_noise = np.random.normal(-1, 1, size=(batch_size, noise_size))
    elif noise == 'normal0_1':
            batch_noise = np.random.normal(0, 1, size=(batch_size, noise_size))
            
    return batch_noise
 
def Discriminator(img, reuse=False, name='discriminator'):  
    with tf.variable_scope(name, reuse=reuse):
  
        output = Fully_connected('df1',img,2048)
        output = LeakyRelu(output)
          
#         output = Fully_connected('df2',output,512)
#         output = LeakyRelu(output)
            
        output = Fully_connected('df3',output,1024)
        output = LeakyRelu(output)
   
#         output = Fully_connected('df4',output,256)
#         output = LeakyRelu(output)
           
        output = Fully_connected('df6',output,512)
        output = LeakyRelu(output)
#             
#         output = Fully_connected('df5',output,1)
        
        prob = tf.sigmoid(output)
        
        return  output,prob
    
def Generator(noise_img, reuse=False, name='generator'):
    with tf.variable_scope(name,reuse=reuse):
             
        output = Fully_connected('gf1',noise_img,2048)
        output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
        output = LeakyRelu(output)
             
        output = Fully_connected('gf2',output,1024)
        output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
        output = LeakyRelu(output)
          
#         output = Fully_connected('gf3',output,512)
#         output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
#         output = LeakyRelu(output)
           
        output = Fully_connected('gf4',output,512)
        output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
        output = tf.nn.relu(output)
          
#         output = Fully_connected('gf6',output,256)
#         output = tf.layers.batch_normalization(output,momentum=0.9,training=True)
#         output = tf.nn.relu(output)
          
            
        output = Fully_connected('gf5',output,180)
        
        output = tf.nn.tanh(output)
        return output
mode = 'gan' # gan, wgan, wgan-gp
noise = 'uniform' # normal0_1, normal, uniform  
batch_size = 100  
epochs = 250
n_sample = 100  
lamda = 10  
img_size  = 180  
noise_size = 100  
  
tf.reset_default_graph()  
  
real_img, noise_img = Get_inputs(img_size,noise_size)#feed于此  
real_data = real_img  
fake_data = Generator(noise_img)  
  
disc_real, disc_prob_real = Discriminator(real_data,reuse=False)  
disc_fake, disc_prob_fake = Discriminator(fake_data,reuse=True)  
 
  
#生成器和判别器中的tensor  
train_vars = tf.trainable_variables()  
g_vars = [var for var in train_vars if var.name.startswith("generator")]  
d_vars = [var for var in train_vars if var.name.startswith("discriminator")]  
 
#普通的GAN
if mode == 'gan':
    #第一种生成器loss,判别器越好,梯度越是容易消失,最优情况下判别器判别真假概率都为0.5
    gen_cost = tf.reduce_mean(tf.log(1-disc_prob_fake))
    disc_cost = -tf.reduce_mean(tf.log(disc_prob_real)+tf.log(1-disc_prob_fake))

    #第二种生成器loss,容易出现模式崩溃,梯度不稳定
#     disc_cost = tf.reduce_mean(-tf.log(disc_prob_real)-tf.log(1-disc_prob_fake))
#     gen_cost = tf.reduce_mean(-tf.log((disc_prob_fake)))
    
#     gen_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.ones_like(disc_fake))) #生成器loss
#     disc_cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_fake,labels=tf.zeros_like(disc_fake)))
#     disc_cost += tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=disc_real,labels=tf.ones_like(disc_real)))
#     disc_cost /= 2. #判别器loss
    #优化器
    gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0.5).minimize(gen_cost,var_list=g_vars)
    disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5).minimize(disc_cost,var_list=d_vars)
    clip_disc_weights = None
    
#wgan
elif mode == 'wgan':
    gen_cost = -tf.reduce_mean(disc_fake) #生成器loss
    disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss
    
    #优化器
    gen_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(gen_cost,var_list=g_vars)
    disc_train_op = tf.train.RMSPropOptimizer(learning_rate=5e-5).minimize(disc_cost,var_list=d_vars)
    clip_ops = []
    #将判别器权重截断到[-0.01,0.01]
    for var in train_vars:
        if var.name.startswith("discriminator"):
            clip_bounds = [-0.01, 0.01]
            clip_ops.append(tf.assign(var,tf.clip_by_value(var,clip_bounds[0],clip_bounds[1])))
    clip_disc_weights = tf.group(*clip_ops)
    
elif mode == 'wgan-gp':
    gen_cost = -tf.reduce_mean(disc_fake) #生成器loss  
    disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real) #判别器loss 
       
    #梯度惩罚
    alpha = tf.random_uniform(shape=[batch_size,1],minval=0.,maxval=1.)  
    interpolates = alpha*fake_data + (1-alpha)*real_data  
    gradients = tf.gradients(Discriminator(interpolates,reuse=True),[interpolates])[0]  
    slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[1]))  
    gradient_penalty = tf.reduce_mean((slopes-1.)**2)  
    disc_cost += lamda * gradient_penalty  
    clip_disc_weights = None
  
    #优化器  
    gen_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(gen_cost,var_list=g_vars)  
    disc_train_op = tf.train.AdamOptimizer(learning_rate=2e-4,beta1=0.5,beta2=0.9).minimize(disc_cost,var_list=d_vars)  
  
saver = tf.train.Saver()  
    
def Train():  
    losses = []  
    with tf.Session() as sess:  
        sess.run(tf.global_variables_initializer())  
        for e in range(epochs):  
            for i in xrange(len(data)//batch_size):  
                batch_images = data[i*batch_size:(i+1)*batch_size]  
                batch_images = batch_images.reshape(batch_size,180) 
                if noise != 'normal0_1' :
                    batch_images = batch_images*2 -1 
                batch_noise = Get_noise(noise,100)
                if mode == 'gan': #普通的gan,判别器,生成器各训练一次
                    disc_iters = 1
                else:             #wgan和wgan-gp,判别器训练多次,生成器训练一次
                    disc_iters = 5 
                for x in range(0, disc_iters):  
                    _,d_loss,d_acc,d_fake = sess.run([disc_train_op,disc_cost,disc_prob_real,disc_prob_fake],feed_dict={real_data:batch_images,noise_img:batch_noise}) 
                    if clip_disc_weights is not None:
                        _ = sess.run(clip_disc_weights) 
                for k in range(1):
                    _,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})  
                Save_lossValue(e,i,d_loss,g_loss)  
                if i % 50 == 0:
                    print("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f, d_real_acc: %.8f, d_fake_acc: %.8f"%(e, i, d_loss, g_loss, d_acc.mean(),d_fake.mean()))  
            losses.append((d_loss,g_loss))
            
            if e == 20 or e == 50 or e == 100 or e == 200 or e == 249:
                saver.save(sess,'Manamg/gan1%d.ckpt' % e )
                
            sample_noise = Get_noise(noise,100)
            gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise})
            real_samples_prob = sess.run(disc_prob_real,feed_dict={real_data:batch_images,noise_img:sample_noise}) 
            print("判别真的概率:\n",real_samples_prob.mean()) 
            print("判别假的概率: \n",gen_samples_prob.mean())

            if e % 1 == 0:  
                gen = gen_samples.reshape(100,3,60,1)
                gen = (gen+1)/2
                gen_images = Manage_gen(gen)
                Save_genImages(gen_images, e)  
        plot_loss(losses)               
def Save_single(arr,mydir):
    data_images = arr
    data_images = data_images.reshape(-1,3,60)
    for i in range(1,len(arr)):
        plt.figure(figsize=(128,128),dpi=1)
        plt.plot(data_images[i][0][0:30],data_images[i][0][30:60],color='blue',linewidth=300)
        plt.plot(data_images[i][1][0:30],data_images[i][1][30:60],color='red',linewidth=300)
        plt.plot(data_images[i][2][0:30],data_images[i][2][30:60],color='green',linewidth=300)
        plt.axis('off')
        plt.savefig(mydir+os.sep+str(i)+'.jpg',dpi=1)
        plt.close()
#处理生成数据分叉,尽量减小端点的误差
def Smooth_gen(data):
    data = data.reshape(-1,3,60)
    data = data[0:1000,:,0:60]
    final = []
    for i in range(len(data)):
        zhu_x = data[i][0][0:30]
        zhu_y = data[i][0][30:60]
        zuo_x = data[i][1][0:30]
        zuo_y = data[i][1][30:60]
        you_x = data[i][2][0:30]
        you_y = data[i][2][30:60]
        ###########################################
        #找到端点,让它之后的值都跟这个端点相等
        #处理左分支
        zuo_index = [] #列表中存的是数组,第一个即为第一个端点 列表中存放的是判断为是插值点的点,a值越小说明,列表中的值越少,索引值,
        #a值越大,列表中的值越多,但是真实点就越少,甚至真实点为0,所以需要判断
        for j in range(1,len(zuo_x)-1):
            threshold = 0.05 # 不能太大也不能太小,太大真实值的索引就会变小,一般索引应大于等于2,(0.05的时候效果比较好)
            if abs(zuo_x[j-1]-zuo_x[j])<threshold and abs(zuo_x[j]-zuo_x[j+1])<threshold:
                if abs(zuo_y[j-1]-zuo_y[j])<threshold and abs(zuo_y[j]-zuo_y[j+1])<threshold:
                    zuo_index.append((j-1,zuo_x[j-1],zuo_y[j-1]))
        #限制左分支条件
        if len(zuo_index) < 2:#continue语句满足条件结束本次循环
            continue
        zuo_duan_index = zuo_index[0][0]
        if zuo_duan_index < 2:
            continue
        zuo_duan_x = zuo_index[0][1]
        zuo_duan_y = zuo_index[0][2]
        zuo_x = np.array(zuo_x) #变为数组,以便于替换
        zuo_y = np.array(zuo_y)
        zuo_x[zuo_duan_index::] = zuo_duan_x
        zuo_y[zuo_duan_index::] = zuo_duan_y
        #处理右分支
        you_index = []
        for j in range(1,len(you_x)-1):
            threshold = 0.05
            if abs(you_x[j-1]-you_x[j])<threshold and abs(you_x[j]-you_x[j+1])<threshold:
                if abs(you_y[j-1]-you_y[j])<threshold and abs(you_y[j]-you_y[j+1])<threshold:
                    you_index.append((j-1,you_x[j-1],you_y[j-1]))
        #限制右分支 条件
        if len(you_index) < 2:
            continue
        you_duan_index = you_index[0][0]
        if you_duan_index < 2:
            continue
        you_duan_x = you_index[0][1]
        you_duan_y = you_index[0][2]
        you_x = np.array(you_x)
        you_y = np.array(you_y)
        you_x[you_duan_index::] = you_duan_x
        you_y[you_duan_index::] = you_duan_y
        #处理主分支
        zhu_index = []
        for j in range(1,len(zhu_x)-1):
            threshold = 0.05
            if abs(zhu_x[j-1]-zhu_x[j])< threshold and abs(zhu_x[j]-zhu_x[j+1])<threshold:
                if abs(zhu_y[j-1]-zhu_y[j])<threshold  and abs(zhu_y[j]-zhu_y[j+1]<threshold):
                    zhu_index.append((j+1,zhu_x[j+1],zhu_y[j+1]))
        #限制主分支条件
        if len(zhu_index) < 2 :
            continue
        zhu_duan_index = zhu_index[-1][0]  #主分支不同于左右分支,它再最后一个
        if zhu_duan_index > 27:
            continue
        zhu_duan_x = zhu_index[-1][1]
        zhu_duan_y = zhu_index[-1][2]
        zhu_x = np.array(zhu_x)
        zhu_y = np.array(zhu_y)
        zhu_x[0:zhu_duan_index] = zhu_duan_x
        zhu_y[0:zhu_duan_index] = zhu_duan_y
        ###############################################
#         plt.scatter(zhu_x,zhu_y,color='red')
#         plt.scatter(zuo_x,zuo_y,color='blue')
#         plt.scatter(you_x,you_y,color='green')
#         plt.show()
        ###############################################
        zhu = np.concatenate((zhu_x,zhu_y))
        zuo = np.concatenate((zuo_x,zuo_y))
        you = np.concatenate((you_x,you_y))
        fencha = np.concatenate((zhu,zuo,you))
        final.append(fencha)
    final = np.array(final)
    final = final.reshape(-1,3,60)
    return final
def Test():  
    saver = tf.train.Saver()  
    with tf.Session() as sess:  
#         saver.restore(sess,tf.train.latest_checkpoint("gan_checkpoints"))  
        saver.restore(sess,'Manamg/gan1249.ckpt')  
        sample_noise = Get_noise(noise,150000)
        gen_samples,gen_samples_prob = sess.run([fake_data,disc_prob_fake],feed_dict={noise_img:sample_noise}) 
        prob = sess.run(tf.reduce_mean(gen_samples_prob,1))
        gen_images = (gen_samples+1)/2 
        gen_images = Manage_gen(gen_images)
        good = [] 
        bad = []
#         print(prob)
        for i in range(len(prob)):
            if 0.49<prob[i]<0.51: 
#             if 0.90<prob[i]<1: 
                good.append(gen_images[i])
        good = np.array(good)
        #处理生成的端点部分,使之平滑
        good = Smooth_gen(good)
        print(good.shape)
#         for i in range(len(good)):
#             plt.plot(good[i][0][0:30],good[i][0][30:60])
#             plt.plot(good[i][1][0:30],good[i][1][30:60])
#             plt.plot(good[i][2][0:30],good[i][2][30:60])
#             plt.show()
#         print(good.shape)
#         Save_single(good,mydir='good')
        np.save("bad.npy",good)
       
        
if __name__ == '__main__':  
    #     Train()  
        Test()  

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值