【tensorflow2.x】使用 wgan-gp 生成 mnist 手写数字

12 篇文章 0 订阅
11 篇文章 0 订阅
import tensorflow as tf
import numpy as np
import cv2

class Generator(tf.keras.Model):
    def __init__(self):
        super(Generator, self).__init__()
        self.fc = tf.keras.Sequential([
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.Dense(28 * 28, activation='sigmoid'),
        ])

    def call(self, inputs):
        return self.fc(inputs)

class Discriminator(tf.keras.Model):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.fc = tf.keras.Sequential([
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(1, activation='linear')
        ])

    def call(self, inputs):
        return self.fc(inputs)

class WGAN_GP():
    def __init__(self):
        self.noise_size = 16
        self.generator = Generator()
        self.discriminator = Discriminator()
        self.generator.build(input_shape=(None, self.noise_size))
        self.discriminator.build(input_shape=(None, 28 * 28))
        self.g_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
        self.d_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)

    def train(self, dataset, batch_size=1024, epochs=500):
        for e in range(epochs):
            generator_loss = list()
            discriminator_loss = list()
            for i in range(int(len(dataset) / batch_size)):
                real_image = dataset[i * batch_size: (i + 1) * batch_size]
                normal_z = np.random.normal(size=(batch_size, self.noise_size))
                with tf.GradientTape() as tape:
                    d_loss = self.d_loss(self.generator, self.discriminator, normal_z, real_image)
                grads = tape.gradient(d_loss, self.discriminator.trainable_variables)
                self.d_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_variables))
                with tf.GradientTape() as tape:
                    g_loss = self.g_loss(self.generator, self.discriminator, normal_z)
                grads = tape.gradient(g_loss, self.generator.trainable_variables)
                self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_variables))
                generator_loss.append(g_loss)
                discriminator_loss.append(d_loss)
            g_l = np.mean(generator_loss)
            d_l = np.mean(discriminator_loss)
            print("epoch: {} / {}, generator loss: {}, discriminator loss: {}".format(
                e + 1, epochs, g_l, d_l
            ))
            if (e + 1) % 10 == 0:
                self.save_image('image_epochs_{}.png'.format(e + 1))

    def save_image(self, path):
        normal_z = np.random.normal(size=(1, self.noise_size))
        image = self.generator.predict(normal_z)
        image = np.reshape(image, newshape=(28, 28)) * 255.0
        cv2.imwrite(path, image)

    @staticmethod
    def gradient_penalty(discriminator, real_image, fake_image):
        assert real_image.shape[0] == fake_image.shape[0]
        batch_size = real_image.shape[0]
        eps = tf.random.uniform([batch_size, 1])
        inter = eps * real_image + (1. - eps) * fake_image
        with tf.GradientTape() as tape:
            tape.watch([inter])
            d_inter_logits = discriminator(inter)
        grads = tape.gradient(d_inter_logits, inter)
        grads = tf.reshape(grads, [grads.shape[0], -1])
        gp = tf.norm(grads, axis=1)
        gp = tf.reduce_mean((gp - 1.) ** 2)
        return gp

    @staticmethod
    def g_loss(generator, discriminator, noise_z):
        fake_image = generator(noise_z)
        d_fake_logits = discriminator(fake_image)
        loss = - tf.reduce_mean(d_fake_logits)
        return loss

    @staticmethod
    def d_loss(generator, discriminator, noise_z, real_image):
        fake_image = generator(noise_z)
        d_fake_logits = discriminator(fake_image)
        d_real_logits = discriminator(real_image)
        gp = WGAN_GP.gradient_penalty(discriminator, real_image, fake_image)
        loss = tf.reduce_mean(d_fake_logits) - tf.reduce_mean(d_real_logits) + 10. * gp
        return loss, gp

if __name__ == '__main__':
    dataset = tf.keras.datasets.mnist

    (x_train, y_train), (x_test, y_test) = dataset.load_data()

    x_train = np.reshape(x_train, newshape=(-1, 28 * 28)) / 255.0

    gen = WGAN_GP()
    gen.train(dataset=x_train)

生成效果

请添加图片描述

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是使用WGAN-GP训练mnist数据集的步骤: 1.导入必要的库和模块 ```python import tensorflow as tf from tensorflow.keras import layers import numpy as np import matplotlib.pyplot as plt ``` 2.加载mnist数据集 ```python (train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data() train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32') train_images = (train_images - 127.5) / 127.5 # 将像素值归一化到[-1, 1]之间 BUFFER_SIZE = 60000 BATCH_SIZE = 256 train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE) ``` 3.定义生成器和判别器模型 ```python def make_generator_model(): model = tf.keras.Sequential() model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,))) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Reshape((7, 7, 256))) assert model.output_shape == (None, 7, 7, 256) # 注意:batch size 没有限制 model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)) assert model.output_shape == (None, 7, 7, 128) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)) assert model.output_shape == (None, 14, 14, 64) model.add(layers.BatchNormalization()) model.add(layers.LeakyReLU()) model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')) assert model.output_shape == (None, 28, 28, 1) return model def make_discriminator_model(): model = tf.keras.Sequential() model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same', input_shape=[28, 28, 1])) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same')) model.add(layers.LeakyReLU()) model.add(layers.Dropout(0.3)) model.add(layers.Flatten()) model.add(layers.Dense(1)) return model ``` 4.定义损失函数和优化器 ```python generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) def discriminator_loss(real_output, fake_output): real_loss = tf.reduce_mean(real_output) fake_loss = tf.reduce_mean(fake_output) return fake_loss - real_loss def generator_loss(fake_output): return -tf.reduce_mean(fake_output) ``` 5.定义训练函数 ```python @tf.function def train_step(images): noise = tf.random.normal([BATCH_SIZE, 100]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output) disc_loss = discriminator_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) ``` 6.训练模型 ```python EPOCHS = 100 noise_dim = 100 num_examples_to_generate = 16 # 我们将重复使用该种子(因此在动画 GIF 中更容易可视化进度) seed = tf.random.normal([num_examples_to_generate, noise_dim]) generator = make_generator_model() discriminator = make_discriminator_model() for epoch in range(EPOCHS): for image_batch in train_dataset: train_step(image_batch) # 每 15 个 epoch 生成一次图片 if epoch % 15 == 0: generate_and_save_images(generator, epoch + 1, seed) # 生成最终的图片 generate_and_save_images(generator, EPOCHS, seed) ``` 7.生成图片 ```python def generate_and_save_images(model, epoch, test_input): # 注意 training` 设定为 False # 因此,所有层都在推理模式下运行(batchnorm)。 predictions = model(test_input, training=False) fig = plt.figure(figsize=(4, 4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i+1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.axis('off') plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() ```

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值