tensorflow2.x 多层自编码器

import  os
import  tensorflow as tf
import  numpy as np
from    tensorflow import keras
from    PIL import Image
from    matplotlib import pyplot as plt

“”"------------------------------多层自编码器-------------------------------"""

tf.random.set_seed(22)
np.random.seed(22)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
assert tf.__version__.startswith('2.')


def loadData():

    (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data('F:/PycharmProjects/TensorFlow-2.0-Tutorials/mini/mnist.npz')
    x_train, x_test = x_train.astype(np.float32) / 255., x_test.astype(np.float32) / 255.

    # plt.imshow(x_train[5])
    # plt.show()
    # In[19]:


    print(x_train.shape, y_train.shape)
    print(x_test.shape, y_test.shape)

    dataset = tf.data.Dataset.from_tensor_slices(x_train)
    dataset = dataset.shuffle(batch_size * 5).batch(batch_size)

    num_batches = x_train.shape[0] // batch_size

    return dataset, num_batches

“”“Dense实现以下操作:output = activation(dot(input,kernel)+ bias) 其中,activation是用activation参数传递的逐元素激活函数,
kernel是该层创建的权重矩阵,bias是由图层创建的偏差向量(仅在use_bias为True时适用)”""

“”"
keras.layers.core.Dense(
units, #代表该层的输出维度
activation=None, #激活函数.但是默认 liner
use_bias=True, #是否使用b
kernel_initializer=‘glorot_uniform’, #初始化w权重,keras/initializers.py
bias_initializer=‘zeros’, #初始化b权重
kernel_regularizer=None, #施加在权重w上的正则项,keras/regularizer.py
bias_regularizer=None, #施加在偏置向量b上的正则项
activity_regularizer=None, #施加在输出上的正则项
kernel_constraint=None, #施加在权重w上的约束项
bias_constraint=None #施加在偏置b上的约束项
)
“”"
“”“tf.nn.relu()函数的目的是,将输入小于0的值幅值为0,输入大于0的值不变”""

class AE(tf.keras.Model):

    def __init__(self):
        super(AE, self).__init__()

        # 784 => 512
        self.fc1 = keras.layers.Dense(512)
        # 512 => h
        self.fc2 = keras.layers.Dense(h_dim)

        # h => 512
        self.fc3 = keras.layers.Dense(512)
        # 512 => image
        self.fc4 = keras.layers.Dense(image_size)

    def encode(self, x):
        x = tf.nn.relu(self.fc1(x))
        x = (self.fc2(x))
        return x



    def decode_logits(self, h):
        x = tf.nn.relu(self.fc3(h))
        x = self.fc4(x)

        return x

    def decode(self, h):
        return tf.nn.sigmoid(self.decode_logits(h))

    def call(self, inputs, training=None, mask=None):
        # encoder
        h = self.encode(inputs)
        # decode
        x_reconstructed_logits = self.decode_logits(h)

        return x_reconstructed_logits

if __name__ == "__main__":

    # image grid
    new_im = Image.new('L', (280, 280))
    image_size = 28 * 28
    h_dim = 20
    num_epochs = 5
    batch_size = 100
    learning_rate = 1e-3

    dataset, num_batches = loadData()
    model = AE()
    model.build(input_shape=(4, image_size))
    model.summary()
    optimizer = keras.optimizers.Adam(learning_rate)

    for epoch in range(num_epochs):
        for step, x in enumerate(dataset):
            x = tf.reshape(x, [-1, image_size])
            with tf.GradientTape() as tape:
                """获取模型的预测值"""
                x_reconstruction_logits = model(x)
                """计算损失值loss,损失值等于预测值与真实值的差的绝对值"""
                reconstruction_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=x, logits=x_reconstruction_logits)
                reconstruction_loss = tf.reduce_sum(reconstruction_loss) / batch_size
            """根据loss值以及变量x求梯度"""
            gradients = tape.gradient(reconstruction_loss, model.trainable_variables)
            gradients,_ = tf.clip_by_global_norm(gradients, 15)
            """优化器根据梯度,学习率等更新变量x的值"""
            optimizer.apply_gradients(zip(gradients, model.trainable_variables))

            if (step + 1) % 50 == 0:
                print("Epoch[{}/{}], Step [{}/{}], Reconst Loss: {:.4f}".format(epoch + 1, num_epochs, step + 1, num_batches, float(reconstruction_loss)))



         # Save the reconstructed images of last batch
        out_logits = model(x[:batch_size // 2])
        out = tf.nn.sigmoid(out_logits)  # out is just the logits, use sigmoid
        out = tf.reshape(out, [-1, 28, 28]).numpy() * 255

        x = tf.reshape(x[:batch_size // 2], [-1, 28, 28])

        x_concat = tf.concat([x, out], axis=0).numpy() * 255.
        x_concat = x_concat.astype(np.uint8)

        index = 0
        for i in range(0, 280, 28):
            for j in range(0, 280, 28):
                im = x_concat[index]
                im = Image.fromarray(im, mode='L')
                new_im.paste(im, (i, j))
                index += 1

        new_im.save('F:/PycharmProjects/TensorFlow-2.0-Tutorials/11-AE/images/vae_reconstructed_epoch_%d.png' % (epoch + 1))
        plt.imshow(np.asarray(new_im))
        plt.show()
        print('New images saved !')
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值