RNN处理mnist

import numpy as np
from sklearn.metrics import accuracy_score
from tensorflow import keras
import tensorflow as tf
from tensorflow.keras import layers
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
(X_train,y_train),(X_test,y_test)=tf.keras.datasets.fashion_mnist.load_data()
X_train.shape,X_test.shape,y_train.shape,y_test.shape
((60000, 28, 28), (10000, 28, 28), (60000,), (10000,))
X_train_1 =  StandardScaler().fit_transform(X_train.astype(np.float32).reshape(-1,1))
X_train_1.shape
(47040000, 1)
X_train_2 = X_train_1.reshape(-1,28,28)
X_train_2.shape
(60000, 28, 28)
X_test_2 =  StandardScaler().fit_transform(X_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
X_test_2.shape
(10000, 28, 28)
X_train = np.expand_dims(X_train_2,-1)
X_test = np.expand_dims(X_test_2,-1)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
x_train = X_train
x_test = X_test
x_shape = x_train.shape
inn = layers.Input(shape=(x_shape[1], x_shape[2], x_shape[3]))
reshape = layers.Reshape(target_shape=(x_shape[1]*x_shape[2], x_shape[3]))(inn)
lstm_layer = layers.LSTM(64, return_sequences=True)(reshape)
lstm_layer2 = layers.LSTM(32, return_sequences=False)(lstm_layer)
dense2 = layers.Dense(64)(lstm_layer2)
outt = layers.Dense(10,activation='softmax')(dense2)
model = keras.Model(inputs=inn, outputs=outt)
model.compile(optimizer=keras.optimizers.Adam(),
             loss=keras.losses.CategoricalCrossentropy(),
             metrics=['accuracy'])
model.summary()
Model: "functional_1"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
input_1 (InputLayer)         [(None, 28, 28, 1)]       0         
_________________________________________________________________
reshape (Reshape)            (None, 784, 1)            0         
_________________________________________________________________
lstm (LSTM)                  (None, 784, 64)           16896     
_________________________________________________________________
lstm_1 (LSTM)                (None, 32)                12416     
_________________________________________________________________
dense (Dense)                (None, 64)                2112      
_________________________________________________________________
dense_1 (Dense)              (None, 10)                650       
=================================================================
Total params: 32,074
Trainable params: 32,074
Non-trainable params: 0
_________________________________________________________________
history = model.fit(X_train, y_train,epochs=10,
                    shuffle=True,
                    batch_size=256,
                    validation_data=(X_test, y_test))
Epoch 1/10
235/235 [==============================] - 19s 83ms/step - loss: 1.6327 - accuracy: 0.3705 - val_loss: 1.3306 - val_accuracy: 0.4695
Epoch 2/10
235/235 [==============================] - 19s 80ms/step - loss: 1.3624 - accuracy: 0.4618 - val_loss: 1.2358 - val_accuracy: 0.5053
Epoch 3/10
235/235 [==============================] - 19s 80ms/step - loss: 1.2118 - accuracy: 0.5224 - val_loss: 1.1574 - val_accuracy: 0.5448
Epoch 4/10
235/235 [==============================] - 19s 81ms/step - loss: 1.1829 - accuracy: 0.5364 - val_loss: 1.1502 - val_accuracy: 0.5378
Epoch 5/10
235/235 [==============================] - 19s 80ms/step - loss: 1.2322 - accuracy: 0.5139 - val_loss: 1.2598 - val_accuracy: 0.5023
Epoch 6/10
235/235 [==============================] - 19s 82ms/step - loss: 1.1953 - accuracy: 0.5228 - val_loss: 1.2095 - val_accuracy: 0.5399
Epoch 7/10
235/235 [==============================] - 19s 82ms/step - loss: 1.1432 - accuracy: 0.5483 - val_loss: 1.1128 - val_accuracy: 0.5632
Epoch 8/10
235/235 [==============================] - 19s 81ms/step - loss: 1.1098 - accuracy: 0.5612 - val_loss: 1.0699 - val_accuracy: 0.5720
Epoch 9/10
235/235 [==============================] - 19s 81ms/step - loss: 1.0333 - accuracy: 0.5909 - val_loss: 1.0201 - val_accuracy: 0.5984
Epoch 10/10
235/235 [==============================] - 19s 81ms/step - loss: 1.0710 - accuracy: 0.5807 - val_loss: 1.1863 - val_accuracy: 0.5393
x_shape = x_train.shape
inn = layers.Input(shape=(x_shape[1], x_shape[2], x_shape[3]))
reshape = layers.Reshape(target_shape=(x_shape[1]*x_shape[2], x_shape[3]))(inn)
lstm_layer = layers.LSTM(128, return_sequences=True)(reshape)
lstm_layer2 = layers.LSTM(64, return_sequences=True)(lstm_layer)
lstm_layer3 = layers.LSTM(32, return_sequences=False)(lstm_layer2)
dense2 = layers.Dense(64)(lstm_layer3)
outt = layers.Dense(10,activation='softmax')(dense2)
model = keras.Model(inputs=inn, outputs=outt)
model.compile(optimizer=keras.optimizers.Adam(),
             loss=keras.losses.CategoricalCrossentropy(),
             metrics=['accuracy'])
history = model.fit(X_train, y_train,epochs=10,
                    shuffle=True,
                    batch_size=256,
                    validation_data=(X_test, y_test))
Epoch 1/10
235/235 [==============================] - 38s 163ms/step - loss: 2.1014 - accuracy: 0.1931 - val_loss: 2.1568 - val_accuracy: 0.1705
Epoch 2/10
235/235 [==============================] - 38s 160ms/step - loss: 2.2060 - accuracy: 0.1552 - val_loss: 2.2256 - val_accuracy: 0.1335
Epoch 3/10
235/235 [==============================] - 37s 160ms/step - loss: 2.2229 - accuracy: 0.1483 - val_loss: 2.2191 - val_accuracy: 0.1607
Epoch 4/10
235/235 [==============================] - 37s 159ms/step - loss: 2.0763 - accuracy: 0.2124 - val_loss: 2.3030 - val_accuracy: 0.1001
Epoch 5/10
235/235 [==============================] - 37s 159ms/step - loss: 2.3035 - accuracy: 0.1000 - val_loss: 2.3052 - val_accuracy: 0.1000
Epoch 6/10
235/235 [==============================] - 37s 159ms/step - loss: 2.3016 - accuracy: 0.0970 - val_loss: 2.3034 - val_accuracy: 0.1000
Epoch 7/10
235/235 [==============================] - 38s 160ms/step - loss: 2.3030 - accuracy: 0.1015 - val_loss: 2.3008 - val_accuracy: 0.1000
Epoch 8/10
235/235 [==============================] - 38s 160ms/step - loss: 2.3030 - accuracy: 0.1007 - val_loss: 2.3011 - val_accuracy: 0.1087
Epoch 9/10
235/235 [==============================] - 38s 160ms/step - loss: 2.2764 - accuracy: 0.1213 - val_loss: 2.3091 - val_accuracy: 0.1000
Epoch 10/10
235/235 [==============================] - 38s 161ms/step - loss: 2.2946 - accuracy: 0.1136 - val_loss: 2.3049 - val_accuracy: 0.1000
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=64,kernel_size=(3,3),
                                 padding='same',activation='selu',
                                 input_shape=(X_train.shape[1:])))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.SeparableConv2D(filters=32,kernel_size=(3,3),padding='same',activation='selu'))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.SeparableConv2D(filters=16,kernel_size=(3,3),padding='same',activation='selu'))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.SeparableConv2D(filters=32,kernel_size=(3,3),padding='same',activation='selu'))
model.add(tf.keras.layers.MaxPool2D())
model.add(tf.keras.layers.SeparableConv2D(filters=64,kernel_size=(3,3),padding='same',activation='selu'))
model.add(tf.keras.layers.GlobalAveragePooling2D())
model.add(tf.keras.layers.Dense(10,activation='softmax'))
model.compile(optimizer='Adam',
              loss=keras.losses.CategoricalCrossentropy(),
              metrics=['acc'])
history = model.fit(X_train, y_train,epochs=10,
                    shuffle=True,
                    batch_size=256,
                    validation_data=(X_test, y_test))
Epoch 1/10
235/235 [==============================] - 3s 12ms/step - loss: 1.2564 - acc: 0.5554 - val_loss: 0.6980 - val_acc: 0.7360
Epoch 2/10
235/235 [==============================] - 2s 10ms/step - loss: 0.6098 - acc: 0.7705 - val_loss: 0.5704 - val_acc: 0.7829
Epoch 3/10
235/235 [==============================] - 3s 11ms/step - loss: 0.5277 - acc: 0.8012 - val_loss: 0.5151 - val_acc: 0.8076
Epoch 4/10
235/235 [==============================] - 2s 10ms/step - loss: 0.4825 - acc: 0.8208 - val_loss: 0.4759 - val_acc: 0.8252
Epoch 5/10
235/235 [==============================] - 2s 10ms/step - loss: 0.4462 - acc: 0.8372 - val_loss: 0.4616 - val_acc: 0.8277
Epoch 6/10
235/235 [==============================] - 2s 10ms/step - loss: 0.4199 - acc: 0.8466 - val_loss: 0.4347 - val_acc: 0.8419
Epoch 7/10
235/235 [==============================] - 2s 10ms/step - loss: 0.3997 - acc: 0.8545 - val_loss: 0.4229 - val_acc: 0.8469
Epoch 8/10
235/235 [==============================] - 2s 10ms/step - loss: 0.3838 - acc: 0.8608 - val_loss: 0.4229 - val_acc: 0.8437
Epoch 9/10
235/235 [==============================] - 2s 10ms/step - loss: 0.3698 - acc: 0.8656 - val_loss: 0.3932 - val_acc: 0.8579
Epoch 10/10
235/235 [==============================] - 2s 10ms/step - loss: 0.3572 - acc: 0.8694 - val_loss: 0.3792 - val_acc: 0.8627

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

潘诺西亚的火山

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值