TensorFlow2.0 (5) 实现 GAN 对抗网络

GAN 神经网络对于某些实践项目上会比 CNN 等网络有更好的实际效果,虽然 loss 不一定会比普遍的神经网络低.但更具有实践意义,特有此文章作为大家的编码 demo

一、 GAN 入门示例

import tensorflow as tf
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time

from IPython import display

tf.__version__

2.0.0

1. 创建生成器和辨别器,以及损失函数,训练


(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()

train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # 将图片标准化到 [-1, 1] 区间内

BUFFER_SIZE = 60000
BATCH_SIZE = 256

# 批量化和打乱数据
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)


# 创建生成器,LSTM 作为生成器
def make_generator_model():
    model = tf.keras.Sequential()
    model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Reshape((7, 7, 256)))
    assert model.output_shape == (None, 7, 7, 256) # 注意:batch size 没有限制

    model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
    assert model.output_shape == (None, 7, 7, 128)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
    assert model.output_shape == (None, 14, 14, 64)
    model.add(layers.BatchNormalization())
    model.add(layers.LeakyReLU())

    model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
    assert model.output_shape == (None, 28, 28, 1)

    return model

# 使用尚未训练的生成器生成一个照片
generator = make_generator_model()

noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)

plt.imshow(generated_image[0, :, :, 0], cmap='gray')

# 生成检测器 使用 CNN
def make_discriminator_model():
    model = tf.keras.Sequential()
    model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
                                     input_shape=[28, 28, 1]))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))

    model.add(layers.Flatten())
    model.add(layers.Dense(1))

    return model

# 使用尚未训练的识别器识别照片
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print(decision)

# 该方法返回计算交叉熵损失的辅助函数
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)



# 辨识器的损失函数
def discriminator_loss(real_output, fake_output):
    real_loss = cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss = real_loss + fake_loss
    return total_loss


    
# 生成器损失函数
def generator_loss(fake_output):
    # 希望把 fake_image 都判定为真
    return cross_entropy(tf.ones_like(fake_output), fake_output)

# 优化函数, learning_rate = 10^-4
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
                                 discriminator_optimizer=discriminator_optimizer,
                                 generator=generator,
                                 discriminator=discriminator)

EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16


# 我们将重复使用该种子(因此在动画 GIF 中更容易可视化进度)
seed = tf.random.normal([num_examples_to_generate, noise_dim])

# 注意 `tf.function` 的使用
# 该注解使函数被“编译”
@tf.function
def train_step(images):
    noise = tf.random.normal([BATCH_SIZE, noise_dim])

    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
      generated_images = generator(noise, training=True)

      real_output = discriminator(images, training=True)
      fake_output = discriminator(generated_images, training=True)

      gen_loss = generator_loss(fake_output)
      disc_loss = discriminator_loss(real_output, fake_output)

    gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
    gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

    generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
    discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))

def train(dataset, epochs):
  for epoch in range(epochs):
    start = time.time()

    for image_batch in dataset:
      train_step(image_batch)

    # 继续进行时为 GIF 生成图像
    display.clear_output(wait=True)
    generate_and_save_images(generator,
                             epoch + 1,
                             seed)

    # 每 15 个 epoch 保存一次模型
    if (epoch + 1) % 15 == 0:
      checkpoint.save(file_prefix = checkpoint_prefix)

    print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))

  # 最后一个 epoch 结束后生成图片
  display.clear_output(wait=True)
  generate_and_save_images(generator,
                           epochs,
                           seed)

def generate_and_save_images(model, epoch, test_input):
  # 注意 training` 设定为 False
  # 因此,所有层都在推理模式下运行(batchnorm)。
  predictions = model(test_input, training=False)

  fig = plt.figure(figsize=(4,4))

  for i in range(predictions.shape[0]):
      plt.subplot(4, 4, i+1)
      plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
      plt.axis('off')

  plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
  plt.show()

train(train_dataset, EPOCHS)

checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))

# 使用 epoch 数生成单张图片
def display_image(epoch_no):
  return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))

display_image(EPOCHS)

anim_file = 'dcgan.gif'

with imageio.get_writer(anim_file, mode='I') as writer:
  filenames = glob.glob('image*.png')
  filenames = sorted(filenames)
  last = -1
  for i,filename in enumerate(filenames):
    frame = 2*(i**0.5)
    if round(frame) > round(last):
      last = frame
    else:
      continue
    image = imageio.imread(filename)
    writer.append_data(image)
  image = imageio.imread(filename)
  writer.append_data(image)

import IPython
if IPython.version_info > (6,2,0,''):
  display.Image(filename=anim_file)

try:
  from google.colab import files
except ImportError:
   pass
else:
  files.download(anim_file)

主要内容都在注释里

  • 2
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: 可以使用TensorFlow 2.0来实现GANGAN是生成对抗网络的缩写,包含生成器和判别器两部分。生成器生成伪造的数据样本,判别器判断其真伪。两个部分互相对抗,通过反复迭代学习提高生成器生成真实样本的能力。在TensorFlow 2.0中可以使用Keras API来实现GAN。 ### 回答2: GAN是生成式对抗网络(Generative Adversarial Network)的英文缩写,是一种深度学习架构,由生成器和判别器两个模型组成。其中生成器的作用是产生类似于真实数据的数据,而判别器的作用则是将生成器产生的数据与真实数据进行比较并判断生成器产生的数据是否为真实的数据。如果生成器产生的数据被判别器判断为假的数据,则生成器需要进行调整,再次尝试产生类似于真实数据的数据,达到真实数据和生成器产生的数据在判别器中无法区分的目的。 利用TensorFlow 2.0实现GAN的步骤如下: 1. 导入相关模块 from tensorflow import keras from tensorflow.keras import layers import numpy as np 2. 定义生成器模型和判别器模型 生成器模型: def make_generator_model(): model = keras.Sequential() model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256) model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 7, 7, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 14, 14, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 28, 28, 1) return model 判别器模型: def make_discriminator_model(): model = keras.Sequential() model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1])) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Flatten()) model.add(layers.Dense(1)) return model 3. 训练模型 设置超参数并编译模型: # 设置超参数 latent_dim = 100 generator = make_generator_model() discriminator = make_discriminator_model() # 设置优化器和损失函数 generator_optimizer = keras.optimizers.Adam(1e-4) discriminator_optimizer = keras.optimizers.Adam(1e-4) cross_entropy = keras.losses.BinaryCrossentropy(from_logits=True) # 编译模型 @tf.function def train_step(images): noise = tf.random.normal([BATCH_SIZE, latent_dim]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output, cross_entropy) disc_loss = discriminator_loss(real_output, fake_output, cross_entropy) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) # 定义损失函数并训练模型 def discriminator_loss(real_output, fake_output, loss_object): real_loss = loss_object(tf.ones_like(real_output), real_output) fake_loss = loss_object(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss def generator_loss(fake_output, loss_object): return loss_object(tf.ones_like(fake_output), fake_output) EPOCHS = 50 noise_dim = 100 num_examples_to_generate = 16 # 生成随机噪声 seed = tf.random.normal([num_examples_to_generate, noise_dim]) # 训练模型 def train(dataset, epochs): for epoch in range(epochs): for image_batch in dataset: train_step(image_batch) # 每训练一次输出一次结果图像 generate_and_save_images(generator, epoch + 1,seed) def generate_and_save_images(model, epoch, test_input): # 使用模型生成图像 predictions = model(test_input, training=False) # 构造结果图像 fig = plt.figure(figsize=(4, 4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i + 1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.axis('off') # 保存图像 plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() # 调用train函数进行训练模型 train(train_dataset, EPOCHS) 上述代码中,train函数接收训练数据集和训练轮数作为参数,每训练一次输出一次结果图像。generate_and_save_images函数用于生成结果图像并保存图像。 4. 验证结果 训练完成后,可以调用generate_and_save_images函数生成随训练次数增加的图像。结果图像应该类似于真实的图像,但是有些许不同之处。如果生成器产生的图像跟真实的图像在判别器中无法区分,则证明GAN模型的训练成功。 以上就是用TensorFlow 2.0实现GAN的详细步骤。GAN模型训练过程较为复杂,但是模型可以产生高质量的图像,因此具有广泛的应用前景。 ### 回答3: GAN(Generative Adversarial Networks)是一种深度学习模型,通过生成器(generator)和判别器(discriminator)两个模块博弈训练,生成器的目标是生成接近真实图像的样本,判别器则需要区分出这些样本是真实还是生成的。这个过程会不断迭代,直到生成器可以生成与真实图像无法区别的样本为止。 在TensorFlow 2.0中实现GAN可以分为以下几个步骤: 1. 定义生成器(generator)和判别器(discriminator)两个模块 2. 定义损失函数,这里使用二元交叉熵损失函数 3. 定义优化器,通常使用Adam优化器 4. 对生成器和判别器进行训练,训练过程中需要每次从真实数据集中随机采样一批数据,再生成同等数量的假样本,将它们输入到判别器中进行训练 5. 使用训练好的生成器生成图像 下面展示代码实现: 首先,导入TensorFlow和其他常用库 ```python import tensorflow as tf import numpy as np import matplotlib.pyplot as plt ``` 定义生成器和判别器模型 ```python def make_generator_model(): model = tf.keras.Sequential() model.add(tf.keras.layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add(tf.keras.layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256) model.add(tf.keras.layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 7, 7, 128) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add(tf.keras.layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 14, 14, 64) model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.LeakyReLU()) model.add(tf.keras.layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 28, 28, 1) return model generator = make_generator_model() def make_discriminator_model(): model = tf.keras.Sequential() model.add(tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1])) model.add(tf.keras.layers.LeakyReLU()) model.add(tf.keras.layers.Dropout(0.3)) model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')) model.add(tf.keras.layers.LeakyReLU()) model.add(tf.keras.layers.Dropout(0.3)) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(1)) return model discriminator = make_discriminator_model() ``` 定义损失函数和优化器 ```python cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True) def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) total_loss = real_loss + fake_loss return total_loss def generator_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) ``` 定义训练函数 ```python @tf.function def train_step(images): noise = tf.random.normal([BATCH_SIZE, 100]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output) disc_loss = discriminator_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) ``` 定义生成图像函数 ```python def generate_images(model, test_input): predictions = model(test_input, training=False) fig = plt.figure(figsize=(4, 4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i+1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.axis('off') plt.show() ``` 开始训练 ```python EPOCHS = 100 BATCH_SIZE = 256 (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') train_images = (train_images - 127.5) / 127.5 BUFFER_SIZE = 60000 train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) for epoch in range(EPOCHS): for image_batch in train_dataset: train_step(image_batch) if epoch % 10 == 0: generate_images(generator, tf.random.normal([16, 100])) ``` 最终会得到生成器生成的数字图像。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值