对自编码(1)进行改进,(1)中实现的网络是用2个编码层,2个解码层,现在对它进行添加编码层和解码层分别为4层
原始数据784--》256--》64--》16--》2
- #encoding=utf-8
- import tensorflow as tf
- from tensorflow.examples.tutorials.mnist import input_data
- mnist = input_data.read_data_sets('/data',one_hot=True)
- learning_rate = 0.01
- n_hidden_1 = 256
- n_hidden_2 = 64
- n_hidden_3 = 16
- n_hidden_4 = 2
- n_input = 784 #输入图片大小28*28
- x = tf.placeholder('float',[None,n_input])
- y = x
- weights = {
- 'encoder_h1':tf.Variable(tf.random_normal([n_input,n_hidden_1])),
- 'encoder_h2':tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])),
- 'encoder_h3':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_3])),
- 'encoder_h4':tf.Variable(tf.random_normal([n_hidden_3,n_hidden_4])),
- 'decoder_h1':tf.Variable(tf.random_normal([n_hidden_4,n_hidden_3])),
- 'decoder_h2':tf.Variable(tf.random_normal([n_hidden_3,n_hidden_2])),
- 'decoder_h3':tf.Variable(tf.random_normal([n_hidden_2,n_hidden_1])),
- 'decoder_h4':tf.Variable(tf.random_normal([n_hidden_1,n_input])),
- }
- biases = {
- 'encoder_b1':tf.Variable(tf.zeros([n_hidden_1])),
- 'encoder_b2':tf.Variable(tf.zeros([n_hidden_2])),
- 'encoder_b3':tf.Variable(tf.zeros([n_hidden_3])),
- 'encoder_b4':tf.Variable(tf.zeros([n_hidden_4])),
- 'decoder_b1':tf.Variable(tf.zeros([n_hidden_3])),
- 'decoder_b2':tf.Variable(tf.zeros([n_hidden_2])),
- 'decoder_b3':tf.Variable(tf.zeros([n_hidden_1])),
- 'decoder_b4':tf.Variable(tf.zeros([n_input])),
- }
- #定义网络模型
- def encoder(x):
- layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x,weights['encoder_h1']),biases['encoder_b1']))
- layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1,weights['encoder_h2']),biases['encoder_b2']))
- layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2,weights['encoder_h3']),biases['encoder_b3']))
- layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3,weights['encoder_h4']),biases['encoder_b4']))
- return layer_4
- def decoder(x):
- layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
- layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']), biases['decoder_b2']))
- layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']), biases['decoder_b3']))
- layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['decoder_h4']), biases['decoder_b4']))
- return layer_4
- y_pred = decoder(encoder(x))
- print('y_pred',y_pred)
- cost = tf.reduce_mean(tf.pow(y-y_pred,2))
- optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
- training_epochs = 20
- batch_size = 256
- display_step = 1
- with tf.Session() as sess:
- tf.global_variables_initializer().run()
- total_batch = int(mnist.train.num_examples/batch_size)
- #循环开始训练
- for epoch in range(training_epochs):
- #遍历全部数据集
- for i in range(total_batch):
- batch_xs,batch_ys = mnist.train.next_batch(batch_size)
- _,c = sess.run([optimizer,cost],feed_dict={x:batch_xs})
- if epoch%display_step == 0:
- print('Epoch:','%04d'%(epoch+1),'cost=','{:.9f}'.format(c))
- print('finished')