【tensorflow2.x】使用 wgan-gp 生成 mnist 手写数字

12 篇文章 0 订阅
11 篇文章 0 订阅
import tensorflow as tf
import numpy as np
import cv2

class Generator(tf.keras.Model):
    def __init__(self):
        super(Generator, self).__init__()
        self.fc = tf.keras.Sequential([
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.Dense(28 * 28, activation='sigmoid'),
        ])

    def call(self, inputs):
        return self.fc(inputs)

class Discriminator(tf.keras.Model):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.fc = tf.keras.Sequential([
            tf.keras.layers.Dense(512, activation='relu'),
            tf.keras.layers.Dense(128, activation='relu'),
            tf.keras.layers.Dense(1, activation='linear')
        ])

    def call(self, inputs):
        return self.fc(inputs)

class WGAN_GP():
    def __init__(self):
        self.noise_size = 16
        self.generator = Generator()
        self.discriminator = Discriminator()
        self.generator.build(input_shape=(None, self.noise_size))
        self.discriminator.build(input_shape=(None, 28 * 28))
        self.g_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
        self.d_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)

    def train(self, dataset, batch_size=1024, epochs=500):
        for e in range(epochs):
            generator_loss = list()
            discriminator_loss = list()
            for i in range(int(len(dataset) / batch_size)):
                real_image = dataset[i * batch_size: (i + 1) * batch_size]
                normal_z = np.random.normal(size=(batch_size, self.noise_size))
                with tf.GradientTape() as tape:
                    d_loss = self.d_loss(self.generator, self.discriminator, normal_z, real_image)
                grads = tape.gradient(d_loss, self.discriminator.trainable_variables)
                self.d_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_variables))
                with tf.GradientTape() as tape:
                    g_loss = self.g_loss(self.generator, self.discriminator, normal_z)
                grads = tape.gradient(g_loss, self.generator.trainable_variables)
                self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_variables))
                generator_loss.append(g_loss)
                discriminator_loss.append(d_loss)
            g_l = np.mean(generator_loss)
            d_l = np.mean(discriminator_loss)
            print("epoch: {} / {}, generator loss: {}, discriminator loss: {}".format(
                e + 1, epochs, g_l, d_l
            ))
            if (e + 1) % 10 == 0:
                self.save_image('image_epochs_{}.png'.format(e + 1))

    def save_image(self, path):
        normal_z = np.random.normal(size=(1, self.noise_size))
        image = self.generator.predict(normal_z)
        image = np.reshape(image, newshape=(28, 28)) * 255.0
        cv2.imwrite(path, image)

    @staticmethod
    def gradient_penalty(discriminator, real_image, fake_image):
        assert real_image.shape[0] == fake_image.shape[0]
        batch_size = real_image.shape[0]
        eps = tf.random.uniform([batch_size, 1])
        inter = eps * real_image + (1. - eps) * fake_image
        with tf.GradientTape() as tape:
            tape.watch([inter])
            d_inter_logits = discriminator(inter)
        grads = tape.gradient(d_inter_logits, inter)
        grads = tf.reshape(grads, [grads.shape[0], -1])
        gp = tf.norm(grads, axis=1)
        gp = tf.reduce_mean((gp - 1.) ** 2)
        return gp

    @staticmethod
    def g_loss(generator, discriminator, noise_z):
        fake_image = generator(noise_z)
        d_fake_logits = discriminator(fake_image)
        loss = - tf.reduce_mean(d_fake_logits)
        return loss

    @staticmethod
    def d_loss(generator, discriminator, noise_z, real_image):
        fake_image = generator(noise_z)
        d_fake_logits = discriminator(fake_image)
        d_real_logits = discriminator(real_image)
        gp = WGAN_GP.gradient_penalty(discriminator, real_image, fake_image)
        loss = tf.reduce_mean(d_fake_logits) - tf.reduce_mean(d_real_logits) + 10. * gp
        return loss, gp

if __name__ == '__main__':
    dataset = tf.keras.datasets.mnist

    (x_train, y_train), (x_test, y_test) = dataset.load_data()

    x_train = np.reshape(x_train, newshape=(-1, 28 * 28)) / 255.0

    gen = WGAN_GP()
    gen.train(dataset=x_train)

生成效果

请添加图片描述

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值