反卷积_上采样对比

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

#config = ConfigProto()
#config.gpu_options.allow_growth = True
#session = InteractiveSession(config=config)
(x_train,x_label),(y_train,y_label)=tf.keras.datasets.mnist.load_data()
x_train.shape,y_train.shape
((60000, 28, 28), (10000, 28, 28))
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
x_train = x_train/255
y_train = y_train/255
x_train = np.reshape(x_train,(len(x_train),28,28,1))
y_train = np.reshape(y_train,(len(y_train),28,28,1))
y_train.shape
(10000, 28, 28, 1)
noise_factor = 0.5
noise_x = x_train + noise_factor*np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
noise_y = y_train + noise_factor*np.random.normal(loc=0.0, scale=1.0, size=y_train.shape)
n = 20
plt.figure(figsize=(60, 12))    # 指定宽为20,高为4
for i in range(1, 11):
    ax = plt.subplot(2, n/2, i)  # 表示将设置为2行,n/2列,当前位置在i。
    plt.imshow(x_train[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    ax = plt.subplot(2, n/2, 10+i)
    plt.imshow(noise_x[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

在这里插入图片描述

input = tf.keras.layers.Input(shape=(28,28,1))
x1 = tf.keras.layers.Conv2D(32,(3,3),padding='same',activation = 'relu',name = 'x1')(input) #变为28*28*32
x2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),strides=(2, 2),padding='same',name = 'x2')(x1) #变为14*14*32
x3 = tf.keras.layers.Conv2D(64,(3,3),padding='same',activation = 'relu',name = 'x3')(x2)#变为14*14*64
x4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),strides=(2, 2), padding="same",name = 'x4')(x3)#变为7*7*64
x5 = tf.keras.layers.Conv2D(64,(3,3),padding="same",activation = 'relu',name = 'x5')(x4)
x6 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2),strides=(2, 2), padding="same",name = 'x6')(x5) #变为4*4*64
x1.get_shape,x2.get_shape,x3.get_shape,x4.get_shape,x5.get_shape,x6.get_shape
(<bound method Tensor.get_shape of <tf.Tensor 'x1/Identity:0' shape=(None, 28, 28, 32) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'x2/Identity:0' shape=(None, 14, 14, 32) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'x3/Identity:0' shape=(None, 14, 14, 64) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'x4/Identity:0' shape=(None, 7, 7, 64) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'x5/Identity:0' shape=(None, 7, 7, 64) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'x6/Identity:0' shape=(None, 4, 4, 64) dtype=float32>>)
x7 = tf.keras.layers.Conv2DTranspose(32,(4,4),strides=(1, 1), padding='valid',name = 'x7')(x6) 
#用4*4的卷积核对齐上一层,shape=(None, 7, 7, 32)  ,padding = valid 自动填充,扩大了图片
x8 = tf.keras.layers.Conv2DTranspose(16,(2,2),strides=(2, 2), padding='same',name = 'x8')(x7) #放大图像为8*8*16
x9 = tf.keras.layers.Conv2DTranspose(1,(2,2),strides=(2, 2), padding='same',name = 'x9')(x8)#放大图像为16*16*8
x10 = tf.keras.layers.Conv2D(1,(3,3),padding="same",activation = 'sigmoid',name = 'x10')(x9)
x7.get_shape,x8.get_shape,x9.get_shape,x10.shape
(<bound method Tensor.get_shape of <tf.Tensor 'x7/Identity:0' shape=(None, 7, 7, 32) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'x8/Identity:0' shape=(None, 14, 14, 16) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'x9/Identity:0' shape=(None, 28, 28, 1) dtype=float32>>,
 TensorShape([None, 28, 28, 1]))
XXX = tf.keras.Model(inputs = input,outputs = x9)
optimizer_1 = tf.keras.optimizers.Adam(learning_rate=0.001)
XXX.compile(optimizer=optimizer_1,
            loss='binary_crossentropy',
            metrics=['accuracy'])
XXX.fit(noise_x,x_train,
        batch_size=64,
        epochs=50,
        shuffle=True,
        validation_data=(noise_y,y_train)
       )
Train on 60000 samples, validate on 10000 samples
Epoch 1/50
60000/60000 [==============================] - 9s 157us/sample - loss: 0.2303 - accuracy: 0.7986 - val_loss: 0.1954 - val_accuracy: 0.7994
Epoch 2/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.2417 - accuracy: 0.7973 - val_loss: 0.1983 - val_accuracy: 0.7980
Epoch 3/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1989 - accuracy: 0.8009 - val_loss: 0.1812 - val_accuracy: 0.8023
Epoch 4/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1857 - accuracy: 0.8026 - val_loss: 0.1743 - val_accuracy: 0.8009
Epoch 5/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1845 - accuracy: 0.8037 - val_loss: 0.1679 - val_accuracy: 0.8051
Epoch 6/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1759 - accuracy: 0.8049 - val_loss: 0.1675 - val_accuracy: 0.8040
Epoch 7/50
60000/60000 [==============================] - 5s 85us/sample - loss: 0.1631 - accuracy: 0.8064 - val_loss: 0.1553 - val_accuracy: 0.8063
Epoch 8/50
60000/60000 [==============================] - 5s 82us/sample - loss: 0.1793 - accuracy: 0.8051 - val_loss: 0.1756 - val_accuracy: 0.8041
Epoch 9/50
60000/60000 [==============================] - 5s 84us/sample - loss: 0.1788 - accuracy: 0.8047 - val_loss: 0.1698 - val_accuracy: 0.8054
Epoch 10/50
60000/60000 [==============================] - 5s 82us/sample - loss: 0.1675 - accuracy: 0.8060 - val_loss: 0.1622 - val_accuracy: 0.8050
Epoch 11/50
60000/60000 [==============================] - 5s 82us/sample - loss: 0.1796 - accuracy: 0.8049 - val_loss: 0.1792 - val_accuracy: 0.8030
Epoch 12/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1683 - accuracy: 0.8058 - val_loss: 0.1568 - val_accuracy: 0.8059
Epoch 13/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1677 - accuracy: 0.8063 - val_loss: 0.1617 - val_accuracy: 0.8048
Epoch 14/50
60000/60000 [==============================] - 5s 82us/sample - loss: 0.1629 - accuracy: 0.8074 - val_loss: 0.1528 - val_accuracy: 0.8068
Epoch 15/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1554 - accuracy: 0.8077 - val_loss: 0.1480 - val_accuracy: 0.8079
Epoch 16/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1591 - accuracy: 0.8075 - val_loss: 0.1489 - val_accuracy: 0.8063
Epoch 17/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1526 - accuracy: 0.8082 - val_loss: 0.1459 - val_accuracy: 0.8079
Epoch 18/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1665 - accuracy: 0.8068 - val_loss: 0.1522 - val_accuracy: 0.8066
Epoch 19/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1552 - accuracy: 0.8075 - val_loss: 0.1487 - val_accuracy: 0.8073
Epoch 20/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1512 - accuracy: 0.8086 - val_loss: 0.1416 - val_accuracy: 0.8083
Epoch 21/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1515 - accuracy: 0.8084 - val_loss: 0.1431 - val_accuracy: 0.8088
Epoch 22/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1507 - accuracy: 0.8089 - val_loss: 0.1520 - val_accuracy: 0.8077
Epoch 23/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1711 - accuracy: 0.8060 - val_loss: 0.1567 - val_accuracy: 0.8067
Epoch 24/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1572 - accuracy: 0.8071 - val_loss: 0.1455 - val_accuracy: 0.8085
Epoch 25/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1535 - accuracy: 0.8079 - val_loss: 0.1468 - val_accuracy: 0.8081
Epoch 26/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1544 - accuracy: 0.8077 - val_loss: 0.1477 - val_accuracy: 0.8082
Epoch 27/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1491 - accuracy: 0.8091 - val_loss: 0.1448 - val_accuracy: 0.8097
Epoch 28/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1558 - accuracy: 0.8076 - val_loss: 0.1543 - val_accuracy: 0.8065
Epoch 29/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1634 - accuracy: 0.8073 - val_loss: 0.1620 - val_accuracy: 0.8062
Epoch 30/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1489 - accuracy: 0.8085 - val_loss: 0.1470 - val_accuracy: 0.8081
Epoch 31/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1654 - accuracy: 0.8065 - val_loss: 0.1569 - val_accuracy: 0.8074
Epoch 32/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1520 - accuracy: 0.8078 - val_loss: 0.1454 - val_accuracy: 0.8074
Epoch 33/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1575 - accuracy: 0.8074 - val_loss: 0.1510 - val_accuracy: 0.8074
Epoch 34/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1523 - accuracy: 0.8081 - val_loss: 0.1439 - val_accuracy: 0.8081
Epoch 35/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1478 - accuracy: 0.8087 - val_loss: 0.1434 - val_accuracy: 0.8087
Epoch 36/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1466 - accuracy: 0.8090 - val_loss: 0.1420 - val_accuracy: 0.8089
Epoch 37/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1456 - accuracy: 0.8093 - val_loss: 0.1465 - val_accuracy: 0.8077
Epoch 38/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1615 - accuracy: 0.8070 - val_loss: 0.1455 - val_accuracy: 0.8078
Epoch 39/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1639 - accuracy: 0.8061 - val_loss: 0.1461 - val_accuracy: 0.8080
Epoch 40/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1546 - accuracy: 0.8078 - val_loss: 0.1511 - val_accuracy: 0.8065
Epoch 41/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1491 - accuracy: 0.8085 - val_loss: 0.1540 - val_accuracy: 0.8039
Epoch 42/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1511 - accuracy: 0.8090 - val_loss: 0.1442 - val_accuracy: 0.8080
Epoch 43/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1502 - accuracy: 0.8089 - val_loss: 0.1498 - val_accuracy: 0.8078
Epoch 44/50
60000/60000 [==============================] - 5s 85us/sample - loss: 0.1454 - accuracy: 0.8095 - val_loss: 0.1513 - val_accuracy: 0.8087
Epoch 45/50
27392/60000 [============>.................] - ETA: 2s - loss: 0.1655 - accuracy: 0.8057
decoded_imgs = XXX.predict(noise_y)
n = 10
plt.figure(figsize=(20, 4))
for i in range(1, n):
    # display original
    ax = plt.subplot(2, n, i)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + n)
    plt.imshow(noise_y[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-MVWyCFW2-1590062786038)(output_14_0.png)]

QQ = tf.keras.utils.plot_model(XXX, show_shapes=True)
QQ

上采样法

input_2 = tf.keras.Input(shape=(28,28,1))
xx1 = tf.keras.layers.Conv2D(16,(3,3),padding='same',activation='relu',name = 'xx1')(input_2) #28*28*16
xx2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding="same",name = 'xx2')(xx1)#strides = 2*2,14*14*16
xx3 = tf.keras.layers.Conv2D(8,(3,3),padding='same',activation='relu',name = 'xx3')(xx2) #14*14*8
xx4 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding="same",name = 'xx4')(xx3)#7*7*8
xx5 = tf.keras.layers.Conv2D(8,(3,3),padding='same',activation='relu',name = 'xx5')(xx4) #7*7*8
xx6 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), padding="same",name = 'xx6')(xx5)#4*4*8
xx1.get_shape,xx2.get_shape,xx3.get_shape,xx4.get_shape,xx5.get_shape,xx6.get_shape
(<bound method Tensor.get_shape of <tf.Tensor 'xx1_1/Identity:0' shape=(None, 28, 28, 16) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx2_1/Identity:0' shape=(None, 14, 14, 16) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx3_1/Identity:0' shape=(None, 14, 14, 8) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx4_1/Identity:0' shape=(None, 7, 7, 8) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx5_1/Identity:0' shape=(None, 7, 7, 8) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx6_1/Identity:0' shape=(None, 4, 4, 8) dtype=float32>>)
xx7 = tf.keras.layers.Conv2D(8, (3, 3), activation="relu", padding="same",name = 'xx7')(xx6) #4*4*8
xx8 = tf.keras.layers.UpSampling2D((2, 2),name = 'xx8')(xx7) #上采样扩大图片 8*8*8
xx9 = tf.keras.layers.Conv2D(8, (3, 3), activation="relu", padding="same",name = 'xx9')(xx8) #8*8*8
xx10 = tf.keras.layers.UpSampling2D((2, 2),name = 'xx10')(xx9) #上采样扩大图片 16*16*8
xx11 = tf.keras.layers.Conv2D(16,(3,3),padding='valid',activation='relu',name = 'xx11')(xx10) #16*16*8## z注意,padding自适应恢复图形形状
xx12 = tf.keras.layers.UpSampling2D((2,2),name = 'xx12')(xx11) #上采样扩大图片 32*32*8
xx13 = tf.keras.layers.Conv2D(1,(3,3),padding='same',activation='sigmoid',name = 'xx13')(xx12) #16*16*1

xx7.get_shape,xx8.get_shape,xx9.get_shape,xx10.get_shape,xx11.get_shape,xx12.get_shape,xx13.get_shape
(<bound method Tensor.get_shape of <tf.Tensor 'xx7_5/Identity:0' shape=(None, 4, 4, 8) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx8_5/Identity:0' shape=(None, 8, 8, 8) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx9_5/Identity:0' shape=(None, 8, 8, 8) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx10_5/Identity:0' shape=(None, 16, 16, 8) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx11_5/Identity:0' shape=(None, 14, 14, 16) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx12_5/Identity:0' shape=(None, 28, 28, 16) dtype=float32>>,
 <bound method Tensor.get_shape of <tf.Tensor 'xx13_5/Identity:0' shape=(None, 28, 28, 1) dtype=float32>>)
XDX = tf.keras.Model(inputs = input_2,outputs = xx13)
XDX.summary()
Model: "model_5"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_3 (InputLayer)         [(None, 28, 28, 1)]       0         
_________________________________________________________________
xx1 (Conv2D)                 (None, 28, 28, 16)        160       
_________________________________________________________________
xx2 (MaxPooling2D)           (None, 14, 14, 16)        0         
_________________________________________________________________
xx3 (Conv2D)                 (None, 14, 14, 8)         1160      
_________________________________________________________________
xx4 (MaxPooling2D)           (None, 7, 7, 8)           0         
_________________________________________________________________
xx5 (Conv2D)                 (None, 7, 7, 8)           584       
_________________________________________________________________
xx6 (MaxPooling2D)           (None, 4, 4, 8)           0         
_________________________________________________________________
xx7 (Conv2D)                 (None, 4, 4, 8)           584       
_________________________________________________________________
xx8 (UpSampling2D)           (None, 8, 8, 8)           0         
_________________________________________________________________
xx9 (Conv2D)                 (None, 8, 8, 8)           584       
_________________________________________________________________
xx10 (UpSampling2D)          (None, 16, 16, 8)         0         
_________________________________________________________________
xx11 (Conv2D)                (None, 14, 14, 16)        1168      
_________________________________________________________________
xx12 (UpSampling2D)          (None, 28, 28, 16)        0         
_________________________________________________________________
xx13 (Conv2D)                (None, 28, 28, 1)         145       
=================================================================
Total params: 4,385
Trainable params: 4,385
Non-trainable params: 0
_________________________________________________________________
optimizer_2 = tf.keras.optimizers.Adam(learning_rate=0.001)
XDX.compile(optimizer=optimizer_2,
            loss = 'binary_crossentropy',
            metrics = ['acc']
           )
history_2 = XDX.fit(noise_x,x_train,
                    batch_size=64,epochs=50,
                    shuffle=True,
                    validation_data=(noise_y,y_train))
Train on 60000 samples, validate on 10000 samples
Epoch 1/50
60000/60000 [==============================] - 7s 111us/sample - loss: 0.2205 - acc: 0.7964 - val_loss: 0.1694 - val_acc: 0.8017
Epoch 2/50
60000/60000 [==============================] - 5s 83us/sample - loss: 0.1586 - acc: 0.8020 - val_loss: 0.1493 - val_acc: 0.8038
Epoch 3/50
60000/60000 [==============================] - 5s 85us/sample - loss: 0.1470 - acc: 0.8047 - val_loss: 0.1424 - val_acc: 0.8060
Epoch 4/50
60000/60000 [==============================] - 5s 83us/sample - loss: 0.1413 - acc: 0.8059 - val_loss: 0.1374 - val_acc: 0.8050
Epoch 5/50
60000/60000 [==============================] - 5s 84us/sample - loss: 0.1377 - acc: 0.8065 - val_loss: 0.1353 - val_acc: 0.8076
Epoch 6/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1350 - acc: 0.8071 - val_loss: 0.1332 - val_acc: 0.8049
Epoch 7/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1328 - acc: 0.8076 - val_loss: 0.1303 - val_acc: 0.8072
Epoch 8/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1308 - acc: 0.8080 - val_loss: 0.1290 - val_acc: 0.8058
Epoch 9/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1292 - acc: 0.8083 - val_loss: 0.1266 - val_acc: 0.8078
Epoch 10/50
60000/60000 [==============================] - 5s 83us/sample - loss: 0.1279 - acc: 0.8085 - val_loss: 0.1264 - val_acc: 0.8067
Epoch 11/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1268 - acc: 0.8088 - val_loss: 0.1244 - val_acc: 0.8081
Epoch 12/50
60000/60000 [==============================] - 5s 83us/sample - loss: 0.1258 - acc: 0.8090 - val_loss: 0.1235 - val_acc: 0.8083
Epoch 13/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1249 - acc: 0.8091 - val_loss: 0.1232 - val_acc: 0.8080
Epoch 14/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1242 - acc: 0.8093 - val_loss: 0.1225 - val_acc: 0.8078
Epoch 15/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1235 - acc: 0.8094 - val_loss: 0.1213 - val_acc: 0.8086
Epoch 16/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1228 - acc: 0.8095 - val_loss: 0.1211 - val_acc: 0.8082
Epoch 17/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1223 - acc: 0.8096 - val_loss: 0.1203 - val_acc: 0.8090
Epoch 18/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1218 - acc: 0.8097 - val_loss: 0.1214 - val_acc: 0.8101
Epoch 19/50
60000/60000 [==============================] - 5s 84us/sample - loss: 0.1213 - acc: 0.8098 - val_loss: 0.1194 - val_acc: 0.8084
Epoch 20/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1208 - acc: 0.8099 - val_loss: 0.1192 - val_acc: 0.8085
Epoch 21/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1205 - acc: 0.8100 - val_loss: 0.1191 - val_acc: 0.8097
Epoch 22/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1201 - acc: 0.8100 - val_loss: 0.1180 - val_acc: 0.8091
Epoch 23/50
60000/60000 [==============================] - 5s 83us/sample - loss: 0.1197 - acc: 0.8101 - val_loss: 0.1180 - val_acc: 0.8088
Epoch 24/50
60000/60000 [==============================] - 5s 83us/sample - loss: 0.1194 - acc: 0.8102 - val_loss: 0.1177 - val_acc: 0.8089
Epoch 25/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1191 - acc: 0.8102 - val_loss: 0.1183 - val_acc: 0.8079
Epoch 26/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1188 - acc: 0.8103 - val_loss: 0.1170 - val_acc: 0.8088
Epoch 27/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1185 - acc: 0.8103 - val_loss: 0.1167 - val_acc: 0.8097
Epoch 28/50
60000/60000 [==============================] - 5s 82us/sample - loss: 0.1182 - acc: 0.8103 - val_loss: 0.1169 - val_acc: 0.8090
Epoch 29/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1181 - acc: 0.8103 - val_loss: 0.1189 - val_acc: 0.8075
Epoch 30/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1179 - acc: 0.8104 - val_loss: 0.1159 - val_acc: 0.8095
Epoch 31/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1178 - acc: 0.8104 - val_loss: 0.1165 - val_acc: 0.8091
Epoch 32/50
60000/60000 [==============================] - 5s 84us/sample - loss: 0.1176 - acc: 0.8104 - val_loss: 0.1161 - val_acc: 0.8093
Epoch 33/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1174 - acc: 0.8105 - val_loss: 0.1169 - val_acc: 0.8107
Epoch 34/50
60000/60000 [==============================] - 5s 82us/sample - loss: 0.1173 - acc: 0.8105 - val_loss: 0.1157 - val_acc: 0.8095
Epoch 35/50
60000/60000 [==============================] - 5s 82us/sample - loss: 0.1171 - acc: 0.8105 - val_loss: 0.1156 - val_acc: 0.8099
Epoch 36/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1170 - acc: 0.8106 - val_loss: 0.1154 - val_acc: 0.8093
Epoch 37/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1168 - acc: 0.8106 - val_loss: 0.1152 - val_acc: 0.8093
Epoch 38/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1168 - acc: 0.8106 - val_loss: 0.1158 - val_acc: 0.8106
Epoch 39/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1166 - acc: 0.8106 - val_loss: 0.1159 - val_acc: 0.8108
Epoch 40/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1166 - acc: 0.8106 - val_loss: 0.1151 - val_acc: 0.8096
Epoch 41/50
60000/60000 [==============================] - 5s 76us/sample - loss: 0.1164 - acc: 0.8107 - val_loss: 0.1151 - val_acc: 0.8100
Epoch 42/50
60000/60000 [==============================] - 5s 78us/sample - loss: 0.1164 - acc: 0.8107 - val_loss: 0.1151 - val_acc: 0.8090
Epoch 43/50
60000/60000 [==============================] - 5s 79us/sample - loss: 0.1163 - acc: 0.8107 - val_loss: 0.1154 - val_acc: 0.8088
Epoch 44/50
60000/60000 [==============================] - 5s 77us/sample - loss: 0.1162 - acc: 0.8107 - val_loss: 0.1144 - val_acc: 0.8101
Epoch 45/50
60000/60000 [==============================] - 5s 78us/sample - loss: 0.1162 - acc: 0.8107 - val_loss: 0.1144 - val_acc: 0.8101
Epoch 46/50
60000/60000 [==============================] - 5s 81us/sample - loss: 0.1160 - acc: 0.8107 - val_loss: 0.1148 - val_acc: 0.8105
Epoch 47/50
60000/60000 [==============================] - 5s 84us/sample - loss: 0.1160 - acc: 0.8107 - val_loss: 0.1150 - val_acc: 0.8093
Epoch 48/50
60000/60000 [==============================] - 5s 83us/sample - loss: 0.1159 - acc: 0.8107 - val_loss: 0.1149 - val_acc: 0.8107
Epoch 49/50
60000/60000 [==============================] - 5s 80us/sample - loss: 0.1159 - acc: 0.8108 - val_loss: 0.1160 - val_acc: 0.8109
Epoch 50/50
60000/60000 [==============================] - 5s 77us/sample - loss: 0.1159 - acc: 0.8108 - val_loss: 0.1158 - val_acc: 0.8084
decoded_imgs_1 = XDX.predict(noise_y)
n = 10
plt.figure(figsize=(20, 4))
for i in range(1, n):
    # display original
    ax = plt.subplot(2, n, i)
    plt.imshow(decoded_imgs_1[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + n)
    plt.imshow(noise_y[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()

[


  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

潘诺西亚的火山

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值