本文展示了在tf.keras自带的数据集fashion_mnist上构建分类模型。
1.导入数据并查看
(1)导入相关的库并查看对应的版本
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import pandas as pd
import sklearn
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
(2)导入数据并划分数据集
fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
print(x_valid.shape, y_valid.shape)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
(3)展示单张图片
def show_single_image(img_arr):
plt.imshow(img_arr, cmap = 'binary')
plt.show()
show_single_image(x_train[0])
(4)展示多张图片
def show_imgs(n_rows, n_cols, x_data, y_data, class_names):
assert len(x_data) == len(y_data)
assert n_rows * n_cols < len(x_data)
plt.figure(figsize = (n_cols * 1.4, n_rows * 1.6))
for row in range(n_rows):
for col in range(n_cols):
index = n_cols * row + col
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(x_data[index], cmap = 'binary', interpolation = 'nearest')
plt.axis('off')
plt.title(class_names[y_data[index]])
plt.show()
class_names = ['T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag','Ankle boot']
show_imgs(3, 5, x_train, y_train, class_names)
2.搭建分类模型
(1)使用keras.model.Sequential搭建模型
'''
# 写法1
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape = [28, 28]))
model.add(keras.layers.Dense(300, activation = 'relu'))
model.add(keras.layers.Dense(100, activation = 'relu'))
model.add(keras.layers.Dense(10, activation = 'softmax'))
'''
# 写法2
model = keras.models.Sequential([
keras.layers.Flatten(input_shape = [28, 28]),
keras.layers.Dense(300, activation = 'relu'),
keras.layers.Dense(100, activation = 'relu'),
keras.layers.Dense(10, activation = 'softmax')
])
# 如果y是一个类别数,用sparse_categorical_crossentropy, 如果y是一个one-hot类别向量,用categorical_crossentropy
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer = 'sgd',
metrics = ['accuracy'])
这里只是搭建和编译好了模型,此时可以查看模型的相关信息,但是模型还没有载数据上进行训练。
model.layers # 查看模型每一层信息
model.summary() # 关于模型的参数信息
(2)模型训练
模型训练之前,要对x进行归一化,有助于提高模型的准确性。
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaler = scaler.fit_transform(
x_train.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
x_valid_scaler = scaler.transform(
x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
x_test_scaler = scaler.transform(
x_test.astype(np.float32).reshape(-1,1)).reshape(-1,28,28)
用归一化之后的数据训练模型:
history = model.fit(x_train_scaler, y_train, epochs = 10,
validation_data=(x_valid_scaler, y_valid))
其中history.history保存了模型的loss等信息,将他们可视化。
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
plot_learning_curves(history)
(3)在测试集上进行评估
model.evaluate(x_test_scaler, y_test)
准确度达到88.02%
(3)问题
如果不用归一化的数据训练模型的话,训练过程中的loss不下降
如何解决神经网络训练时loss不下降的问题