以下代码均在jupyterlab里面实现
# 添加注释
%config IPComplter.greedy=True
# 导入库
import os
import numpy as np
# 读写gzip文件的时候需要使用到gzip模块
import gzip
# 导入本地数据集
def load_data(data_floder):
## 获取文件的路径
filesname = ['train-labels-idx1-ubyte.gz' , 'train-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz' ]
paths = []
for fname in filesname:
## os.path.join()路径拼接函数
paths.append(os.path.join(data_floder,fname))
## 读取本地的gz文件,然后转化成nupmy矩阵形式
## 文件读写:'rb':二进制方式读取文件、'wb':以二进制方式写文件,如果文件存在会覆盖
with gzip.open(paths[0],'rb') as lbpath:
# 第一个参数是stream,第二个是返回的类型,第三个是指定从stream的第几位开始
y_train = np.frombuffer(lbpath.read(),np.uint8,offset=8)
with gzip.open(paths[1],'rb') as impath:
x_train = np.frombuffer(impath.read(),np.uint8,offset=16).reshape(len(y_train),28,28)
with gzip.open(paths[2],'rb') as lbpath:
y_test = np.frombuffer(lbpath.read(),np.uint8,offset=8)
with gzip.open(paths[3],'rb') as impath:
x_test = np.frombuffer(impath.read(),np.uint8,offset=16).reshape(len(y_test),28,28)
return (x_train,y_train),(x_test,y_test)
(train_images,train_labels),(test_images,test_labels) = load_data('fashion')
from tensorflow import keras
import tensorflow as tf
## 定义终止训练的函数,myCallback这个类继承了tf.keras.callbacks.Callback
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
if(logs.get('loss')<0.4):
print('\loss is low so cancelling training!')
self.model.stop_training = True
callbacks = myCallback()
## 创建模型
model = tf.keras.Sequential([tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512,activation=tf.nn.relu),
tf.keras.layers.Dense(10,activation=tf.nn.softmax)])
## 编译模型
model.compile(optimizer=tf.optimizers.Adam(),loss=tf.losses.sparse_categorical_crossentropy,metrics=['accuracy'])
## 格式化训练数据
train_images_scales = train_images/255
test_images_scales = test_images/255
## 训练模型,每次训练结束之后都会自动调用这个callback函数
model.fit(train_images_scales,train_labels,epochs=5,callbacks=[callbacks])
训练结果:
Train on 60000 samples
Epoch 1/5
60000/60000 [==============================] - 6s 98us/sample - loss: 0.4719 - accuracy: 0.8318
Epoch 2/5
59744/60000 [============================>.] - ETA: 0s - loss: 0.3574 - accuracy: 0.8696\loss is low so cancelling training!
60000/60000 [==============================] - 5s 82us/sample - loss: 0.3575 - accuracy: 0.8695
<tensorflow.python.keras.callbacks.History at 0x27328039668>