1、TF2.0实战
模型结构
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 28, 28, 32) 320 _________________________________________________________________ conv2d_1 (Conv2D) (None, 28, 28, 32) 9248 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 14, 14, 64) 18496 _________________________________________________________________ conv2d_3 (Conv2D) (None, 14, 14, 64) 36928 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 7, 7, 64) 0 _________________________________________________________________ conv2d_4 (Conv2D) (None, 7, 7, 128) 73856 _________________________________________________________________ conv2d_5 (Conv2D) (None, 7, 7, 128) 147584 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 3, 3, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 1152) 0 _________________________________________________________________ dense (Dense) (None, 128) 147584 _________________________________________________________________ dense_1 (Dense) (None, 10) 1290 ================================================================= Total params: 435,306 Trainable params: 435,306 Non-trainable params: 0 _________________________________________________________________
代码:
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sklearn
import pandas as pd
import os
import sys
import time
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()
x_valid, x_train = x_train_all[:5000], x_train_all[5000:]
y_valid, y_train = y_train_all[:5000], y_train_all[5000:]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train_scaled = scaler.fit_transform(
x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)#最后一个是通道数目
x_valid_scaled = scaler.transform(
x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)
x_test_scaled = scaler.transform(
x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)
model = keras.models.Sequential()
#filters = 32,输出有多少个通道,即有多少个卷积核
model.add(keras.layers.Conv2D(filters=32, kernel_size=3,
padding='same',
activation='relu',
input_shape=(28, 28, 1)))
model.add(keras.layers.Conv2D(filters=32, kernel_size=3,
padding='same',
activation='relu'))
model.add(keras.layers.MaxPool2D(pool_size=2))#步长和size通常相等,因此设置一个即可。
#经过pooling层后,为了缓解信息损失,通常将filters翻倍
model.add(keras.layers.Conv2D(filters=64, kernel_size=3,
padding='same',
activation='relu'))
model.add(keras.layers.Conv2D(filters=64, kernel_size=3,
padding='same',
activation='relu'))
model.add(keras.layers.MaxPool2D(pool_size=2))
model.add(keras.layers.Conv2D(filters=128, kernel_size=3,
padding='same',
activation='relu'))
model.add(keras.layers.Conv2D(filters=128, kernel_size=3,
padding='same',
activation='relu'))
model.add(keras.layers.MaxPool2D(pool_size=2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer = "sgd",
metrics = ["accuracy"])
logdir = os.path.join("cnn-relu-callbacks")
if not os.path.exists(logdir):
os.mkdir(logdir)
output_model_file = os.path.join(logdir,
"fashion_mnist_model.h5")
callbacks = [
keras.callbacks.TensorBoard(logdir),
keras.callbacks.ModelCheckpoint(output_model_file,
save_best_only = True),
keras.callbacks.EarlyStopping(patience=5, min_delta=1e-3),
]
history = model.fit(x_train_scaled, y_train, epochs=10,
validation_data=(x_valid_scaled, y_valid),
callbacks = callbacks)
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
plot_learning_curves(history)
model.evaluate(x_test_scaled, y_test, verbose = 0)
2、TF1.0实战
import tensorflow as tf
import os
import pickle
import numpy as np
CIFAR_DIR = "./cifar-10-batches-py"
print(os.listdir(CIFAR_DIR))
def load_data(filename):
"""read data from data file."""
with open(filename, 'rb') as f:
data = pickle.load(f, encoding='bytes')
return data[b'data'], data[b'labels']
# tensorflow.Dataset.
class CifarData:
def __init__(self, filenames, need_shuffle):
all_data = []
all_labels = []
for filename in filenames:
data, labels = load_data(filename)
all_data.append(data)
all_labels.append(labels)
self._data = np.vstack(all_data)
self._data = self._data / 127.5 - 1
self._labels = np.hstack(all_labels)
print(self._data.shape)
print(self._labels.shape)
self._num_examples = self._data.shape[0]
self._need_shuffle = need_shuffle
self._indicator = 0
if self._need_shuffle:
self._shuffle_data()
def _shuffle_data(self):
# [0,1,2,3,4,5] -> [5,3,2,4,0,1]
p = np.random.permutation(self._num_examples)
self._data = self._data[p]
self._labels = self._labels[p]
def next_batch(self, batch_size):
"""return batch_size examples as a batch."""
end_indicator = self._indicator + batch_size
if end_indicator > self._num_examples:
if self._need_shuffle:
self._shuffle_data()
self._indicator = 0
end_indicator = batch_size
else:
raise Exception("have no more examples")
if end_indicator > self._num_examples:
raise Exception("batch size is larger than all examples")
batch_data = self._data[self._indicator: end_indicator]
batch_labels = self._labels[self._indicator: end_indicator]
self._indicator = end_indicator
return batch_data, batch_labels
train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)]
test_filenames = [os.path.join(CIFAR_DIR, 'test_batch')]
train_data = CifarData(train_filenames, True)
test_data = CifarData(test_filenames, False)
x = tf.placeholder(tf.float32, [None, 3072])
y = tf.placeholder(tf.int64, [None])
# [None], eg: [0,5,6,3]
x_image = tf.reshape(x, [-1, 3, 32, 32])
# 32*32
x_image = tf.transpose(x_image, perm=[0, 2, 3, 1])
# conv1: 神经元图, feature_map, 输出图像
conv1 = tf.layers.conv2d(x_image,
32, # output channel number
(3,3), # kernel size
padding = 'same',
activation = tf.nn.relu,
name = 'conv1')
# 16 * 16
pooling1 = tf.layers.max_pooling2d(conv1,
(2, 2), # kernel size
(2, 2), # stride
name = 'pool1')
conv2 = tf.layers.conv2d(pooling1,
32, # output channel number
(3,3), # kernel size
padding = 'same',
activation = tf.nn.relu,
name = 'conv2')
# 8 * 8
pooling2 = tf.layers.max_pooling2d(conv2,
(2, 2), # kernel size
(2, 2), # stride
name = 'pool2')
conv3 = tf.layers.conv2d(pooling2,
32, # output channel number
(3,3), # kernel size
padding = 'same',
activation = tf.nn.relu,
name = 'conv3')
# 4 * 4 * 32
pooling3 = tf.layers.max_pooling2d(conv3,
(2, 2), # kernel size
(2, 2), # stride
name = 'pool3')
# [None, 4 * 4 * 32]
flatten = tf.layers.flatten(pooling3)
y_ = tf.layers.dense(flatten, 10)
loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_)
# y_ -> sofmax
# y -> one_hot
# loss = ylogy_
# indices
predict = tf.argmax(y_, 1)
# [1,0,1,1,1,0,0,0]
correct_prediction = tf.equal(predict, y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))
with tf.name_scope('train_op'):
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
init = tf.global_variables_initializer()
batch_size = 20
train_steps = 100000
test_steps = 100
# train 10k: 71.35%
with tf.Session() as sess:
sess.run(init)
for i in range(train_steps):
batch_data, batch_labels = train_data.next_batch(batch_size)
loss_val, acc_val, _ = sess.run(
[loss, accuracy, train_op],
feed_dict={
x: batch_data,
y: batch_labels})
if (i+1) % 100 == 0:
print('[Train] Step: %d, loss: %4.5f, acc: %4.5f'
% (i+1, loss_val, acc_val))
if (i+1) % 1000 == 0:
test_data = CifarData(test_filenames, False)
all_test_acc_val = []
for j in range(test_steps):
test_batch_data, test_batch_labels \
= test_data.next_batch(batch_size)
test_acc_val = sess.run(
[accuracy],
feed_dict = {
x: test_batch_data,
y: test_batch_labels
})
all_test_acc_val.append(test_acc_val)
test_acc = np.mean(all_test_acc_val)
print('[Test ] Step: %d, acc: %4.5f'
% (i+1, test_acc))
3、将2中的TF1.0代码转为TF2.0代码(如有错误,感谢指正)
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 32, 32, 32) 896 _________________________________________________________________ max_pooling2d (MaxPooling2D) (None, 16, 16, 32) 0 _________________________________________________________________ conv2d_1 (Conv2D) (None, 16, 16, 64) 18496 _________________________________________________________________ max_pooling2d_1 (MaxPooling2 (None, 8, 8, 64) 0 _________________________________________________________________ conv2d_2 (Conv2D) (None, 8, 8, 128) 73856 _________________________________________________________________ max_pooling2d_2 (MaxPooling2 (None, 4, 4, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 2048) 0 _________________________________________________________________ dense (Dense) (None, 10) 20490 ================================================================= Total params: 113,738 Trainable params: 113,738 Non-trainable params: 0 _________________________________________________________________
import tensorflow as tf
import os
import pickle
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
%matplotlib inline
import sklearn
import pandas as pd
import sys
import time
from tensorflow import keras
print(tf.__version__)
print(sys.version_info)
for module in mpl, np, pd, sklearn, tf, keras:
print(module.__name__, module.__version__)
CIFAR_DIR = os.path.join("cifar-10-batches-py")
print(os.listdir(CIFAR_DIR))
def load_data(filename):
"""read data from data file."""
with open(filename, 'rb') as f:
data = pickle.load(f, encoding='bytes')
return data[b'data'], data[b'labels']
train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)]
test_filenames = [os.path.join(CIFAR_DIR, 'test_batch')]
all_filenames = train_filenames + test_filenames
all_train_data = []
all_train_labels = []
all_test_data = []
all_test_labels = []
for filename in all_filenames:
if filename.startswith(os.path.join(CIFAR_DIR, 'data')):
data, labels = load_data(filename)
all_train_data.append(data)
all_train_labels.append(labels)
train_data = np.vstack(all_train_data)
train_labels = np.hstack(all_train_labels)
else:
data, labels = load_data(filename)
all_test_data.append(data)
all_test_labels.append(labels)
test_data = np.vstack(all_test_data)
test_labels = np.hstack(all_test_labels)
print(train_data.shape)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_data_scaled = scaler.fit_transform(
train_data.astype(np.float32).reshape(-1, 1)).reshape( -1, 3, 32, 32)
test_data_scaled = scaler.transform(
test_data.astype(np.float32).reshape(-1, 1)).reshape(-1, 3, 32, 32)
train_data_scaled = tf.transpose(train_data_scaled, perm=[0, 2, 3, 1])
test_data_scaled = tf.transpose(test_data_scaled, perm=[0, 2, 3, 1])
model = keras.models.Sequential()
#filters = 32,输出有多少个通道,即有多少个卷积核
model.add(keras.layers.Conv2D(filters=32, kernel_size=3,
padding='same',
activation='relu',
input_shape=train_data_scaled.shape[1:]))
model.add(keras.layers.MaxPool2D(pool_size=2))
model.add(keras.layers.Conv2D(filters=64, kernel_size=3,
padding='same',
activation='relu'))
model.add(keras.layers.MaxPool2D(pool_size=2))
model.add(keras.layers.Conv2D(filters=128, kernel_size=3,
padding='same',
activation='relu'))
model.add(keras.layers.MaxPool2D(pool_size=2))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
#optimizer = keras.optimizers.SGD(0.001),
optimizer = "sgd",
metrics = ["accuracy"])
model.summary()
history = model.fit(train_data_scaled, train_labels, epochs=100,
validation_data=(test_data_scaled, test_labels))
#把值的变化过程用图打印出来
def plot_learning_curves(history):
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.gca().set_ylim(0, 1)
plt.show()
plot_learning_curves(history)