import tensorflow as tf
from tensorflow.keras import datasets,layers,models
import matplotlib.pyplot as plt
(train_images,train_labels),\
(test_images,test_labels) = datasets.fashion_mnist.load_data()
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images,test_images = train_images/255.0,test_images/255.0
train_images.shape,test_images.shape,train_labels.shape,test_labels.shape
((60000, 28, 28), (10000, 28, 28), (60000,), (10000,))
train_images = train_images.reshape((60000,28,28,1))
test_images = test_images.reshape((10000,28,28,1))
train_images.shape,test_images.shape,train_labels.shape,test_labels.shape
((60000, 28, 28, 1), (10000, 28, 28, 1), (60000,), (10000,))
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
plt.figure(figsize=(20,10))
for i in range(20):
plt.subplot(5,10,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i],cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = models.Sequential([
layers.Conv2D(32,(3,3),activation='relu',input_shape=(28,28,1)),
layers.MaxPool2D((2,2)),
layers.Conv2D(64,(3,3),activation='relu'),
layers.MaxPool2D(2,2),
layers.Conv2D(64,(3,3),activation='relu'),
layers.Flatten(),
layers.Dense(64,activation='relu'),
layers.Dense(10)
])
model.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_3 (Conv2D) (None, 26, 26, 32) 320
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 13, 13, 32) 0
_________________________________________________________________
conv2d_4 (Conv2D) (None, 11, 11, 64) 18496
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 5, 5, 64) 0
_________________________________________________________________
conv2d_5 (Conv2D) (None, 3, 3, 64) 36928
_________________________________________________________________
flatten_1 (Flatten) (None, 576) 0
_________________________________________________________________
dense_2 (Dense) (None, 64) 36928
_________________________________________________________________
dense_3 (Dense) (None, 10) 650
=================================================================
Total params: 93,322
Trainable params: 93,322
Non-trainable params: 0
_________________________________________________________________
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_images,train_labels,epochs=10,
validation_data=(test_images,test_labels))
Train on 60000 samples, validate on 10000 samples
Epoch 1/10
60000/60000 [==============================] - 144s 2ms/sample - loss: 0.4953 - accuracy: 0.8195 - val_loss: 0.3678 - val_accuracy: 0.8666
Epoch 2/10
60000/60000 [==============================] - 140s 2ms/sample - loss: 0.3212 - accuracy: 0.8831 - val_loss: 0.3217 - val_accuracy: 0.8832
Epoch 3/10
60000/60000 [==============================] - 140s 2ms/sample - loss: 0.2728 - accuracy: 0.8999 - val_loss: 0.2854 - val_accuracy: 0.8988
Epoch 4/10
60000/60000 [==============================] - 139s 2ms/sample - loss: 0.2427 - accuracy: 0.9099 - val_loss: 0.2898 - val_accuracy: 0.8928
Epoch 5/10
60000/60000 [==============================] - 140s 2ms/sample - loss: 0.2200 - accuracy: 0.9198 - val_loss: 0.2645 - val_accuracy: 0.9049
Epoch 6/10
60000/60000 [==============================] - 139s 2ms/sample - loss: 0.1992 - accuracy: 0.9268 - val_loss: 0.2607 - val_accuracy: 0.9043
Epoch 7/10
60000/60000 [==============================] - 137s 2ms/sample - loss: 0.1835 - accuracy: 0.9319 - val_loss: 0.2475 - val_accuracy: 0.9114
Epoch 8/10
60000/60000 [==============================] - 139s 2ms/sample - loss: 0.1686 - accuracy: 0.9369 - val_loss: 0.2867 - val_accuracy: 0.9043
Epoch 9/10
60000/60000 [==============================] - 138s 2ms/sample - loss: 0.1555 - accuracy: 0.9416 - val_loss: 0.2651 - val_accuracy: 0.9104
Epoch 10/10
60000/60000 [==============================] - 135s 2ms/sample - loss: 0.1423 - accuracy: 0.9464 - val_loss: 0.2734 - val_accuracy: 0.9090
plt.imshow(test_images[1])
<matplotlib.image.AxesImage at 0x7fda29d3e990>
import numpy as np
pre = model.predict(test_images)
print(class_names[np.argmax(pre[1])])
Pullover
plt.plot(history.history['accuracy'],label='accuracy')
plt.plot(history.history['val_accuracy'],
label= 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5,1])
plt.legend(loc='lower right')
plt.show()
test_loss,test_acc = model.evaluate(test_images,
test_labels,
verbose=2)
10000/1 - 10s - loss: 0.1572 - accuracy: 0.9090
print('测试准确度为',test_acc)
测试准确度为 0.909