AE算法伪代码如下图2所示：

# AE算法重建图像的Python实现

import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import scipy
from scipy import ndimage
import math

# 下载数据集

path = '/home/yuelingyi/PyCharmProjects/Practive/AE/test_images'

bath_size = 1

# 定义卷积函数
def conv2d(input_, shape, k_step, name):
# input_ = [batch_size, in_height, in_width, in_channels]   format NHWC
# shape = [filter_height, filter_width, in_channels, out_channels]
with tf.variable_scope(name):
w = tf.get_variable('w', shape, initializer=tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_variable('b', [shape[-1]], initializer=tf.constant_initializer(value=0.0))
return conv

# 定义解卷积函数
def deconv2d(input_, output_shape, k_step, name):
# input_ = input_ = [batch_size, in_height, in_width, in_channels]
# output_shaoe = [batch_size, output_height, output_width, output_channels]
w = tf.get_variable('w', [k_step, k_step, output_shape[-1], input_.shape()[-1], initialier = tf.truncated_normal_initializer(stddev=0.02)])
b = tf.get_variable('b', [output_shape[-1]], initializer=tf.constant_initializer(value=0.0) )
rerurn deconv

# 定义激活函数leakyrelu
def leakyrelu(x, leaky=0.2):
return max(x, x*leaky)

#def leakyrelu(x, leaky=0.2):
#k1 = (1 + leaky) * 0.5
#k2 = (1 - leaky) * 0.5
# return k1 * x + k2 * tf.abs(x)

# 定义全连接层函数
def fully_connected(input_, shape, name):
w = tf.get_valiable('w', shape, initializer= tf.truncated_normal_initializer(stddev=0.02))
b = tf.get_valiable('b', shape[-1], initializer=tf.constant_initializer(value=0.0))
fc = tf.matmul(input_, w) + b
return fc

#定义转换函数
def rescale_image(image):
convert_image = (image / 1.5 + 0.5) * 255
return convert_image

# 定义保存图像函数
def save_image(input_, size, image_path, color, iter):
h, w = input_.shape[1], input_shape[2]
convert_input = input_.reshape(batch_size, h, w)
if color is True:
image = np.zeros((h*size, w*size, 3))
else:
image = np.zeros((h*size, w*size))
for index, img in enumerate(convert_input):
i = index % size
j = math.floor(img / size)
if color is True:
image[h*j:h*j+h, i*w:i*w+w,:] = img
else:
image[h*j:h*j+h, i*w:i*w+w] = img
scipy.misc.toimage(rescale_image(image), cmin=0, cmax=0).save(image_path + 'tr_gt_%s.png' % (iter))

# 搭建AE框架，这里使用4层编码4层解码，卷积核大小(3, 3)， 步长2
def AutoEncoder(inputs):
with tf.variable_scope("AutoEncoder", reuse=tf.AUTO_REUSE) as scope0:
conv1 = leakyrelu(conv2d(input_, [3, 3, 1, 16], 2, 'conv1'))
conv2 = leakyrelu(conv2d(conv1, [3, 3, 16, 32], 2, 'conv2'))
conv3 = leakyrelu(conv2d(conv2, [3, 3, 32, 64], 2, 'conv3'))
conv4 = leakyrelu(conv2d(conv3, [3, 3, 64, 128], 2, 'conv4'))
deconv1 = leakyrelu(deconv2d(conv4, [batch_size, 4, 4, 64], 'deconv1'))
deconv2 = leakyrelu(deconv2d(deconv1, [batch_size, 7, 7, 32], 2, 'deconv2'))
deconv3 = leakyrelu(deconv2d(deconv2, [batch_size, 14, 14, 16], 2, 'deconv3'))
deconv4 = leakyrelu(deconv2d(deconv3, [batch_size, 28, 28, 1], 2, 'deconv4'))
output = tf.tanh(deconv4)
return output

with tf.name_scope('input'):
input_image = tf.placeholder(tf.float32, [None, 28, 28, 1], "input_image")

with tf.name_scope("Network"):
generate_image = AutoEncoder(input_image)

tf.summary.image("output_image", generate_image, 100)

with tf.name_scope("loss"):
Auto_loss = tf.reduce_mean(tf.reduce_sum(tf.pow(tf.suntract(generate_iamge, input_image), 2), 3))

tf.summary.scalar("loss", Auto_loss)

train_var = tf.trainable_variables()

with tf.name_scope("train"):
train_loss = tf.train.AdmaOptimizer(0.001, beta1 = 0.9).minimize(Auto_loss)

init = tf.initialize_all_variables()

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)

with tf.Session(config=tf.ConfigProto(gpu_options=gup_options)) as sess:
sess.run(init)
merged_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(''/home/yuelingyi/PyCharmProjects/Practive/AE/logs'', sess.graph)
for i in range(3000):
mnist_image = tf.train.next_batch(batch_size)
batch_image = mnist_image[0].reshape(batch_size, 28, 28, 1)
sess.run(train_loss, feed_dict={input_image: batch_image})
print(sess.run(Auto_loss, feed_dict={input_image: batch_image}))
summary = sess.run(merged_summary, feed_dict={input_iamge: batch_image})

if i % 50 == 0:
output_image = sess.run(genereat_image, feed_dict={input_iamge: batch_image})
result =  sess.run(merged_summary, feed_dict={input_image: batch_image})
save_image(output_image, 1, path, False, i)