Tensorflow2 GAN 系列(一)——基本GAN

本文使用最基本得GAN结构进行手写数字生成

GAN的基本思想:

1.生成器生成目标结果

2.判别器判断输入结果是真实内容还是生成器生成的结果

生成器和判别器通常是两个分开的深度神经网络

基本GAN结构中生成器的输入:随机噪声

基本GAN结构中生成器的输出:手写数字图像

判别器输入:真实图片或生成器输出的假图

生成器输出:判断为真或者假

生成器目标:让判别器尽量把生成的结果判断为真

判别器目标:尽量区分真实图片和生成图片

 

 

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import matplotlib.pyplot as plt


(train_images,train_labels),_=keras.datasets.mnist.load_data()

train_images=2*tf.cast(train_images,tf.float32)/255.-1
train_images=tf.expand_dims(train_images,-1)

Batch_Size=256
Buffer_Size=60000 #乱序范围

dataset=tf.data.Dataset.from_tensor_slices(train_images).shuffle(Buffer_Size).batch(Batch_Size)

def generator_model():
    model=tf.keras.Sequential()
    model.add(layers.Dense(256,input_shape=(100,),use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(512,use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(28*28,use_bias=False,activation='tanh'))
    model.add(layers.BatchNormalization())
    model.add(layers.Reshape([28,28,1]))
    return model

def discriminator_model():
    model=tf.keras.Sequential()
    model.add(layers.Flatten())
    model.add(layers.Dense(512,use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Dense(512, use_bias=False))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())
    model.add(layers.Dense(1))
    return model

cross_entropy=keras.losses.BinaryCrossentropy(from_logits=True)

def discriminator_loss(real_out,fake_out):
    real_loss=cross_entropy(tf.ones_like(real_out),real_out)
    fake_loss = cross_entropy(tf.zeros_like(fake_out), fake_out)
    return real_loss+fake_loss

def generator_loss(fake_out):
    fake_loss = cross_entropy(tf.ones_like(fake_out), fake_out)
    return fake_loss

generator_opt=tf.keras.optimizers.Adam(0.0001)
discriminator_opt=tf.keras.optimizers.Adam(0.0001)

Epochs=100
input_dim=100
num_exp_to_generate=16
seed=tf.random.normal([num_exp_to_generate,input_dim])

generator=generator_model()
discriminator=discriminator_model()

def train_step(images):
    noise=tf.random.normal([Batch_Size,input_dim])
    with tf.GradientTape() as gen_tape,tf.GradientTape() as dis_tape:
        real_out=discriminator(images)
        gen_img =generator(noise)
        fake_out=discriminator(gen_img)
        dis_loss=discriminator_loss(real_out,fake_out)
        gen_loss=generator_loss(fake_out)

    gen_gard=gen_tape.gradient(gen_loss,generator.trainable_variables)
    dis_gard = dis_tape.gradient(dis_loss, discriminator.trainable_variables)
    discriminator_opt.apply_gradients(zip(dis_gard,discriminator.trainable_variables))
    generator_opt.apply_gradients(zip(gen_gard, generator.trainable_variables))

def genrate_plot_image(gen_model,test_noise):
    pre_images=gen_model(test_noise,training=False)
    fig=plt.figure(figsize=(4,4))
    for i in range(pre_images.shape[0]):
        plt.subplot(4,4,i+1)
        plt.imshow((pre_images[i,:,:,0]+1)/2*255.)
        plt.axis('off')
    plt.show()

def train(dataset,epochs):
    for epoch in range(epochs):
        for image_batch in dataset:
            train_step(image_batch)
        print(epoch)
        genrate_plot_image(generator,seed)

if __name__ == '__main__':
    train(dataset,200)

 

  • 7
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
Sure! 这是一个简单的基于TensorflowGAN代码,涉及两个神经网络模型——生成器和判别器: ```python import tensorflow as tf import numpy as np # 定义GAN的判别器和生成器 def generator(x, reuse=False): with tf.variable_scope('Generator', reuse=reuse): x = tf.layers.dense(x, units=256, activation=tf.nn.relu) x = tf.layers.dense(x, units=784, activation=tf.nn.sigmoid) return x def discriminator(x, reuse=False): with tf.variable_scope('Discriminator', reuse=reuse): x = tf.layers.dense(x, units=256, activation=tf.nn.relu) x = tf.layers.dense(x, units=1, activation=None) return x # 定义损失函数,包括交叉熵和梯度下降优化器 x = tf.placeholder(tf.float32, shape=[None, 784], name='x') z = tf.placeholder(tf.float32, shape=[None, 100], name='z') g = generator(z) d_real = discriminator(x) d_fake = discriminator(g, reuse=True) d_loss_real = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=tf.ones_like(d_real))) d_loss_fake = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=tf.zeros_like(d_fake))) d_loss = d_loss_real + d_loss_fake g_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=tf.ones_like(d_fake))) d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Discriminator') g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Generator') d_train = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars) g_train = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars) # 开始训练模型 batch_size = 32 z_dim = 100 epochs = 100 samples = [] sess = tf.Session() sess.run(tf.global_variables_initializer()) for epoch in range(epochs): for step in range(mnist.train.num_examples // batch_size): batch_x, _ = mnist.train.next_batch(batch_size) batch_x = batch_x*2 - 1 batch_z = np.random.uniform(-1, 1, size=(batch_size, z_dim)) sess.run(d_train, feed_dict={x: batch_x, z: batch_z}) sess.run(g_train, feed_dict={z: batch_z}) if epoch%10 == 0: print("Epoch {}/{}".format(epoch+1, epochs)) batch_z = np.random.uniform(-1, 1, size=(16, z_dim)) gen_samples = sess.run(generator(z, reuse=True), feed_dict={z: batch_z}) samples.append(gen_samples) ``` 以上是一个简单的GAN代码,你可以根据需要进行更改和调整。
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值