import tensorflow as tf
from tensorflow. keras import datasets, layers, models
import matplotlib. pyplot as plt
( train_images, train_labels) , ( test_images, test_labels) = datasets. mnist. load_data( )
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 [==============================] - 35s 3us/step
train_images, test_images = train_images / 255.0 , test_images / 255.0
train_images. shape, test_images. shape, train_labels. shape, test_labels. shape
((60000, 28, 28), (10000, 28, 28), (60000,), (10000,))
plt. figure( figsize= ( 20 , 10 ) )
for i in range ( 20 ) :
plt. subplot( 5 , 10 , i+ 1 )
plt. xticks( [ ] )
plt. yticks( [ ] )
plt. grid( False )
plt. imshow( train_images[ i] , cmap= plt. cm. binary)
plt. xlabel( train_labels[ i] )
plt. show( )
train_images = train_images. reshape( ( 60000 , 28 , 28 , 1 ) )
test_images = test_images. reshape( ( 10000 , 28 , 28 , 1 ) )
train_images. shape, test_images. shape, train_labels. shape, test_labels. shape
((60000, 28, 28, 1), (10000, 28, 28, 1), (60000,), (10000,))
model = models. Sequential( [
layers. Conv2D( 32 , ( 3 , 3 ) , activation= 'relu' , input_shape= ( 28 , 28 , 1 ) ) ,
layers. MaxPooling2D( ( 2 , 2 ) ) ,
layers. Conv2D( 64 , ( 3 , 3 ) , activation= 'relu' ) ,
layers. MaxPooling2D( ( 2 , 2 ) ) ,
layers. Flatten( ) ,
layers. Dense( 64 , activation= 'relu' ) ,
layers. Dense( 10 )
] )
model. summary( )
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 26, 26, 32) 320
max_pooling2d (MaxPooling2D (None, 13, 13, 32) 0
)
conv2d_1 (Conv2D) (None, 11, 11, 64) 18496
max_pooling2d_1 (MaxPooling (None, 5, 5, 64) 0
2D)
flatten (Flatten) (None, 1600) 0
dense (Dense) (None, 64) 102464
dense_1 (Dense) (None, 10) 650
=================================================================
Total params: 121,930
Trainable params: 121,930
Non-trainable params: 0
_________________________________________________________________
model. compile ( optimizer= 'adam' ,
loss= tf. keras. losses. SparseCategoricalCrossentropy( from_logits= True ) ,
metrics= [ 'accuracy' ] )
history = model. fit( train_images, train_labels, epochs= 10 ,
validation_data= ( test_images, test_labels) )
Epoch 1/10
1875/1875 [==============================] - 30s 16ms/step - loss: 0.1356 - accuracy: 0.9585 - val_loss: 0.0465 - val_accuracy: 0.9850
Epoch 2/10
1875/1875 [==============================] - 33s 18ms/step - loss: 0.0452 - accuracy: 0.9861 - val_loss: 0.0421 - val_accuracy: 0.9870
Epoch 3/10
1875/1875 [==============================] - 37s 20ms/step - loss: 0.0315 - accuracy: 0.9898 - val_loss: 0.0278 - val_accuracy: 0.9905
Epoch 4/10
1875/1875 [==============================] - 34s 18ms/step - loss: 0.0232 - accuracy: 0.9926 - val_loss: 0.0307 - val_accuracy: 0.9904
Epoch 5/10
1875/1875 [==============================] - 31s 17ms/step - loss: 0.0179 - accuracy: 0.9942 - val_loss: 0.0257 - val_accuracy: 0.9912
Epoch 6/10
1875/1875 [==============================] - 31s 16ms/step - loss: 0.0136 - accuracy: 0.9956 - val_loss: 0.0296 - val_accuracy: 0.9900
Epoch 7/10
1875/1875 [==============================] - 31s 17ms/step - loss: 0.0097 - accuracy: 0.9964 - val_loss: 0.0259 - val_accuracy: 0.9932
Epoch 8/10
1875/1875 [==============================] - 32s 17ms/step - loss: 0.0090 - accuracy: 0.9973 - val_loss: 0.0277 - val_accuracy: 0.9927
Epoch 9/10
1875/1875 [==============================] - 30s 16ms/step - loss: 0.0076 - accuracy: 0.9973 - val_loss: 0.0309 - val_accuracy: 0.9916
Epoch 10/10
1875/1875 [==============================] - 30s 16ms/step - loss: 0.0060 - accuracy: 0.9980 - val_loss: 0.0328 - val_accuracy: 0.9915
plt. imshow( test_images[ 1 ] )
<matplotlib.image.AxesImage at 0x28e9c726160>
pre = model. predict( test_images)
pre[ 1 ]
313/313 [==============================] - 1s 4ms/step
array([ 4.8554344, 4.144166 , 26.35997 , -10.464082 , -8.790907 ,
-15.091766 , 1.9568326, -10.758024 , -1.3119435, -12.41055 ],
dtype=float32)