Python深度学习(10):VAE生成手写数字

VAE简介

自编码器是接受一张图像,通过编码器将其映射到潜在向量空间,再通过解码器将其解码为与图像同样大小的输出。VAE是向自编码器中添加了一些统计信息,迫使网络学习连续的、高度结构化的潜在空间。
具体:
(1)将输入图像转换为潜在空间的z_mean和z_log_variance两个参数。
(2)从z_means和z_log_variance所定义的潜在分布中随机采样一个点
(3)使用解码器模块映射到原输入图像大小。

代码

import keras
from keras import layers
from keras import backend as K
from keras.models import Model
import numpy as np
from keras.datasets import mnist
import matplotlib.pyplot as plt
from scipy.stats import norm

img_shape = (28, 28, 1) #输入大小
batch_size = 16 #batch
latent_dim = 2 #潜在空间的维度

#搭建网络
input_img = keras.Input(shape = img_shape)

x = layers.Conv2D(32, 3, padding='same', activation='relu')(input_img)
x = layers.Conv2D(64, 3, padding='same', activation='relu', strides=(2,2))(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
x = layers.Conv2D(64, 3, padding='same', activation='relu')(x)
shape_before_flattening = K.int_shape(x)

x = layers.Flatten()(x)
x = layers.Dense(32, activation='relu')(x)
#最终输出结果z_mean和z_log_var
z_mean = layers.Dense(latent_dim)(x)
z_log_var = layers.Dense(latent_dim)(x)
#潜在随机空间采样
def sampling(args):
    z_means, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0, stddev=1,)
    return z_mean + K.exp(0.5*z_log_var) * epsilon

z = layers.Lambda(sampling)([z_mean, z_log_var]) #包装到Lambda层

#解码器实现

decoder_input = layers.Input(K.int_shape(z)[1:])
#对输入进行上采样
x =layers.Dense(np.prod(shape_before_flattening[1:]), activation='relu')(decoder_input)
#将x转为特征图,使其形状与编码器模型最后一个Flatten层之前的特征图的形状相同
x = layers.Reshape(shape_before_flattening[1:])(x)
#将x解码为与原输入图像具有相同尺寸的特征图
x = layers.Conv2DTranspose(32, 3, padding='same', activation='relu', strides=(2,2))(x)
x = layers.Conv2D(1, 3, padding='same', activation='sigmoid')(x)
#将解码器模型实例化
decoder = Model(decoder_input, x)
#将实例用于z
z_decoded = decoder(z)
#用于计算VAE损失
class CustomVariationalLayer(keras.layers.Layer):
    def vae_loss(self, x, z_decoded):
        x = K.flatten(x)
        z_decoded = K.flatten(z_decoded)
        xent_loss = keras.metrics.binary_crossentropy(x, z_decoded)
        k1_loss = -5e-4 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
        return K.mean(xent_loss + k1_loss)
    #编写一个call方法实现自定义层
    def call(self, inputs):
        x = inputs[0]
        z_decoded = inputs[1]
        loss = self.vae_loss(x, z_decoded)
        self.add_loss(loss, inputs = inputs)
        return x

y = CustomVariationalLayer()([input_img, z_decoded])

vae = Model(input_img, y) #定义模型
vae.compile(optimizer='rmsprop', loss=None) #编译模型
print(vae.summary())

(x_train, _), (x_test, y_test) = mnist.load_data() #加载数据

x_train = x_train.astype('float32') / 255. #对图片除以255缩放
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.astype('float32') / 255.
x_test = x_test.reshape(x_test.shape + (1,))

vae.fit(x=x_train, y=None,
        shuffle = True,
        epochs = 10,
        batch_size = batch_size,
        validation_data = (x_test, None)) #训练模型

n = 15
digit_size = 28
figure = np.zeros((digit_size*n, digit_size*n))
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))

for i, yi in enumerate(grid_x):
    for j, xi in enumerate(grid_y):
        z_sample = np.array([xi, yi])
        z_sample = np.tile(z_sample, batch_size).reshape(batch_size, 2)
        x_decoded = decoder.predict(z_sample, batch_size=batch_size)
        digit = x_decoded[0].reshape(digit_size, digit_size)
        figure[i*digit_size : (i+1)*digit_size, j*digit_size : (j+1)*digit_size] = digit

plt.figure(figsize = (10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()

运行结果

在这里插入图片描述

推荐阅读

VAE直观理解
李宏毅老师VAE部分笔记

  • 2
    点赞
  • 11
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值