GAN代码整理

from datetime import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from six.moves import xrange
data = np.load('data/final37.npy')
data = data[:,:,0:60]
#显示原始数据图像
def Show_images(data,show_nums,save=False):
    index = 0
    for n in range(show_nums):
        show_images = data[index:index+100]
        show_images = show_images.reshape(100,3,60,1)
        r,c = 10,10
        fig,axs = plt.subplots(r,c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                xy = show_images[cnt]
                for k in range(len(xy)):
                    x = xy[k][0:30]
                    y = xy[k][30:60]
                    if k == 0 :
                        axs[i,j].plot(x,y,color='blue',linewidth=2)
                    if k == 1:
                        axs[i,j].plot(x,y,color='red',linewidth=2)
                    if k == 2:
                        axs[i,j].plot(x,y,color='green',linewidth=2)
                        axs[i,j].axis('off')
                cnt += 1
        index += 100
        if save:
            if not os.path.exists('This_epoch'):
                os.makedirs('This_epoch')
            fig.savefig('This_epoch/%d.jpg' % n)
            plt.close()
        else:
            plt.show()
            
def Save_genImages(gen, epoch):
    r,c = 10,10
    fig,axs = plt.subplots(r,c)
    cnt = 0
    for i in range(r):
        for j in range(c):
            xy = gen[cnt]
            for k in range(len(xy)):
                x = xy[k][0:30]
                y = xy[k][30:60]
                if k == 0:
                    axs[i,j].plot(x,y,color='blue')
                if k == 1:
                    axs[i,j].plot(x,y,color='red')
                if k == 2:
                    axs[i,j].plot(x,y,color='green')
                    axs[i,j].axis('off')
            cnt += 1
    if not os.path.exists('gen_img1'):
        os.makedirs('gen_img1')
    fig.savefig('gen_img1/%d.jpg' % epoch)
    plt.close()
def Save_lossValue(epoch,iters,d_loss,g_loss):
    with open('losst.txt','a') as f:
        f.write("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(epoch, iters, d_loss, g_loss)+'\n')
def plot_loss(loss):
    fig,ax = plt.subplots(figsize=(20,7))
    losses = np.array(loss)
    plt.plot(losses.T[0], label="Discriminator Loss")
    plt.plot(losses.T[1], label="Generator Loss")
    plt.title("Training Losses")
    plt.legend()
    plt.savefig('loss.jpg')
    plt.show()
#定义Relu激活函数
def Relu(name, tensor):
    return tf.nn.relu(tensor,name)

#定义LeakyRelu激活函数
def LeakyRelu(name, x, leak=0.2):
    return tf.maximum(x,leak*x,name=name)

#定义全连接层
def Fully_connected(name, value, output_shape):
    with tf.variable_scope(name,reuse=None) as scope:
        shape = value.get_shape().as_list()
        w = tf.get_variable('w',[shape[1],output_shape],dtype=tf.float32,
                            initializer=tf.random_normal_initializer(stddev=0.01))
        b = tf.get_variable('b',[output_shape],dtype=tf.float32,initializer=tf.constant_initializer(0.0))
        
        return tf.matmul(value,w) + b
    
#定义一维卷积
def Conv1d(name, tensor, ksize, out_dim, stride, padding, stddev=0.01):
    with tf.variable_scope(name):
        w = tf.get_variable('w',[ksize,tensor.get_shape()[-1],out_dim],dtype=tf.float32,
                            initializer=tf.random_normal_initializer(stddev=stddev))
        var = tf.nn.conv1d(tensor,w,stride,padding=padding)
        b = tf.get_variable('b',[out_dim],'float32',initializer=tf.constant_initializer(0.01))
        
        return tf.nn.bias_add(var,b)
    
#定义二维卷积
def Conv2d(name, tensor, filter_size1 ,filter_size2, out_dim, stride1, stride2, padding, stddev=0.01):
    with tf.variable_scope(name):
        w = tf.get_variable('w',[filter_size1, filter_size2, tensor.get_shape()[-1], out_dim], dtype=tf.float32,
                            initializer=tf.random_normal_initializer(stddev=stddev))
        var = tf.nn.conv2d(tensor, w, [1, stride1, stride2, 1], padding=padding)
        b = tf.get_variable('b',[out_dim], 'float32', initializer=tf.constant_initializer(0.01))
        
        return tf.nn.bias_add(var,b)
    
#定义二维反卷积
def Deconv2d(name, tensor, filter_size1, filter_size2, outshape, stride1, stride2, padding, stddev=0.01):
    with tf.variable_scope(name):
        w = tf.get_variable('w', [filter_size1, filter_size2, outshape[-1], tensor.get_shape()[-1]], dtype=tf.float32,
                                 initializer=tf.random_normal_initializer(stddev=stddev))
        var = tf.nn.conv2d_transpose(tensor, w, outshape, strides=[1,stride1, stride2, 1], padding=padding)
        b = tf.get_variable('b', [outshape[-1]],'float32', initializer=tf.constant_initializer(0.01))
        
        return tf.nn.bias_add(var,b)
def Get_inputs(real_size,noise_size):
        real_img = tf.placeholder(tf.float32, [None, real_size], name='real_img')
        noise_img = tf.placeholder(tf.float32, [None, noise_size], name='noise_img')
        
        return real_img, noise_img
    
def Generator(noise_img, reuse=False, alpha=0.01):
    with tf.variable_scope('generator',reuse=reuse):
#         print(noise_img.shape)
        output = tf.layers.dense(noise_img,128)
#         print(output.shape)
        output = tf.maximum(alpha * output,output)
        output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
        output = tf.layers.dropout(output, rate=0.25)
        
        output = tf.layers.dense(output,512)
        output = tf.maximum(alpha * output,output)
        output = tf.layers.batch_normalization(output,momentum=0.8,training=True)
        output = tf.layers.dropout(output,rate=0.25)
        
        output = tf.layers.dense(output,180)
        output = tf.tanh(output)
        return output
def Discriminator(img,reuse=False,alpha=0.01):
    
    with tf.variable_scope("discriminator", reuse=reuse):
        print(img.shape)
        output = tf.layers.dense(img,512)
        output = tf.maximum(alpha * output, output)
        
        output = tf.layers.dense(output,128)
        output = tf.maximum(alpha * output, output)
        
        output = tf.layers.dense(output,1)
        return output
    
batch_size = 100
epochs = 1
n_sample = 100
learning_rate = 0.0002
lamda = 10
img_size  = 180
noise_size = 100

tf.reset_default_graph()

real_img, noise_img = Get_inputs(img_size,noise_size)#feed于此
real_data = real_img
fake_data = Generator(noise_img)

disc_real = Discriminator(real_data,reuse=False)
disc_fake = Discriminator(fake_data,reuse=True)


#生成器和判别器中的tensor
train_vars = tf.trainable_variables()
g_vars = [var for var in train_vars if var.name.startswith("generator")]
d_vars = [var for var in train_vars if var.name.startswith("discriminator")]

gen_cost = -tf.reduce_mean(disc_fake)#生成器loss
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
 
alpha = tf.random_uniform(shape=[batch_size,1],minval=0.,maxval=1.)
interpolates = alpha*fake_data + (1-alpha)*real_data
gradients = tf.gradients(Discriminator(interpolates,reuse=True),[interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients),reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes-1.)**2)
disc_cost += lamda * gradient_penalty




#优化器
gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5,beta2=0.9).minimize(gen_cost,var_list=g_vars)
disc_train_op = tf.train.AdamOptimizer(learning_rate=1e-4,beta1=0.5,beta2=0.9).minimize(disc_cost,var_list=d_vars)

saver = tf.train.Saver()
def Train():
    losses = []
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for e in range(epochs):
            for i in xrange(len(data)//batch_size):
                batch_images = data[i*batch_size:(i+1)*batch_size]
                batch_images = batch_images.reshape(batch_size,180)
                batch_images = batch_images*2 -1 
                batch_noise = np.random.uniform(-1,1,size=(batch_size,noise_size))
                for x in range(0,1):
                    _,d_loss = sess.run([disc_train_op,disc_cost],feed_dict={real_data:batch_images,noise_img:batch_noise})
                _,g_loss = sess.run([gen_train_op,gen_cost],feed_dict={noise_img:batch_noise})
                Save_lossValue(e,i,d_loss,g_loss)
                print("第%d个epoch,第%d个batch , d_loss: %.8f, g_loss: %.8f"%(e, i, d_loss, g_loss))
                losses.append((d_loss,g_loss))
            sample_noise = np.random.uniform(-1,1,size=(100,100))
            gen_samples = sess.run(Generator(noise_img,reuse=True),feed_dict={noise_img:sample_noise})
            print(gen_samples.shape)
            saver.save(sess,'checkpoints/test.ckpt')
            if e % 1 == 0:
                gen = gen_samples.reshape(100,3,60,1)
                Save_genImages(gen, e)
        plot_loss(losses)             

def Test():
    saver = tf.train.Saver(var_list=g_vars)
    with tf.Session() as sess:
        saver.restore(sess,tf.train.latest_checkpoint("checkpoints"))
#         saver.restore(sess,'checkppoints/b.ckpt')
        sample_noise = np.random.uniform(-1, 1, size=(10000,noise_size))
        gen_samples = sess.run(Generator(noise_img,reuse=True),feed_dict={noise_img:sample_noise})
        gen_images = (gen_samples+1)/2
        show_num = len(gen_images)//100
        Show_images(gen_images,show_num,save=True)

if __name__ == '__main__':
    Train()
    #Test()










以下是一个简单的 GAN 代码示例,用于生成手写数字图像: ```python import numpy as np import matplotlib.pyplot as plt from tensorflow.keras.datasets import mnist from tensorflow.keras.layers import Dense, Flatten, Reshape, Input, Dropout, LeakyReLU from tensorflow.keras.models import Sequential, Model from tensorflow.keras.optimizers import Adam from tqdm import tqdm # 加载 MNIST 数据集 (X_train, _), (_, _) = mnist.load_data() # 将像素值缩放到 -1 到 1 之间 X_train = (X_train.astype(np.float32) - 127.5) / 127.5 X_train = np.expand_dims(X_train, axis=3) # 定义生成器模型 generator = Sequential([ Dense(256, input_shape=(100,)), LeakyReLU(alpha=0.2), Dense(512), LeakyReLU(alpha=0.2), Dense(1024), LeakyReLU(alpha=0.2), Dense(28 * 28 * 1, activation='tanh'), Reshape((28, 28, 1)) ]) # 定义判别器模型 discriminator = Sequential([ Flatten(input_shape=(28, 28, 1)), Dense(512), LeakyReLU(alpha=0.2), Dropout(0.3), Dense(256), LeakyReLU(alpha=0.2), Dropout(0.3), Dense(1, activation='sigmoid') ]) # 编译判别器模型 discriminator.compile(optimizer=Adam(lr=0.0002, beta_1=0.5), loss='binary_crossentropy', metrics=['accuracy']) # 将判别器设置为不可训练 discriminator.trainable = False # 定义 GAN 模型 gan_input = Input(shape=(100,)) gan_output = discriminator(generator(gan_input)) gan = Model(gan_input, gan_output) # 编译 GAN 模型 gan.compile(optimizer=Adam(lr=0.0002, beta_1=0.5), loss='binary_crossentropy') # 训练 GAN 模型 epochs = 10000 batch_size = 128 for epoch in range(epochs): # 选择一个随机的样本批次 idx = np.random.randint(0, X_train.shape[0], batch_size) real_images = X_train[idx] # 生成一批假图像 noise = np.random.normal(0, 1, (batch_size, 100)) fake_images = generator.predict(noise) # 训练判别器模型 discriminator_loss_real = discriminator.train_on_batch(real_images, np.ones((batch_size, 1))) discriminator_loss_fake = discriminator.train_on_batch(fake_images, np.zeros((batch_size, 1))) discriminator_loss = 0.5 * np.add(discriminator_loss_real, discriminator_loss_fake) # 训练生成器模型 noise = np.random.normal(0, 1, (batch_size, 100)) generator_loss = gan.train_on_batch(noise, np.ones((batch_size, 1))) # 输出训练过程 if epoch % 100 == 0: print(f'Epoch: {epoch}, Discriminator Loss: {discriminator_loss}, Generator Loss: {generator_loss}') # 生成一些手写数字图像 noise = np.random.normal(0, 1, (10, 100)) generated_images = generator.predict(noise) generated_images = 0.5 * generated_images + 0.5 fig, axs = plt.subplots(1, 10, figsize=(20, 2)) cnt = 0 for i in range(10): axs[i].imshow(generated_images[cnt, :, :, 0], cmap='gray') axs[i].axis('off') cnt += 1 plt.show() ``` 在该代码中,我们使用了 Keras 框架来定义生成器和判别器模型,并将它们组合成一个 GAN 模型。我们使用 MNIST 数据集作为示例数据集,并在训练过程中生成一些手写数字图像以进行可视化。在训练过程中,我们首先训练判别器模型来区分真实图像和假图像,然后训练生成器模型来生成更逼真的假图像。最终,我们可以得到一个可以生成手写数字图像的 GAN 模型。
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值