activation 激活函数
relu, sigmoid, tanh
weight initializer 参数初始化
he(何恺明), xavier, normal, truncated_normal
optimizer 优化方法
Adam, Momentum, Gradient Descent.
这些都实现在我们的计算图构建之中
我们在原来vggnet的基础上
import tensorflow as tf import os import pickle import numpy as np CIFAR_DIR = "dataset/cifar-10-batches-py" print(os.listdir(CIFAR_DIR)) def load_data(filename): """read data from data file.""" with open(filename, 'rb') as f: data = pickle.load(f, encoding='bytes') return data[b'data'], data[b'labels'] # tensorflow.Dataset. class CifarData: def __init__(self, filenames, need_shuffle): all_data = [] all_labels = [] for filename in filenames: data, labels = load_data(filename) all_data.append(data) all_labels.append(labels) self._data = np.vstack(all_data) self._data = self._data / 127.5 - 1 self._labels = np.hstack(all_labels) print(self._data.shape) print(self._labels.shape) self._num_examples = self._data.shape[0] self._need_shuffle = need_shuffle self._indicator = 0 if self._need_shuffle: self._shuffle_data() def _shuffle_data(self): # [0,1,2,3,4,5] -> [5,3,2,4,0,1] p = np.random.permutation(self._num_examples) self._data = self._data[p] self._labels = self._labels[p] def next_batch(self, batch_size): """return batch_size examples as a batch.""" end_indicator = self._indicator + batch_size if end_indicator > self._num_examples: if self._need_shuffle: self._shuffle_data() self._indicator = 0 end_indicator = batch_size else: raise Exception("have no more examples") if end_indicator > self._num_examples: raise Exception("batch size is larger than all examples") batch_data = self._data[self._indicator: end_indicator] batch_labels = self._labels[self._indicator: end_indicator] self._indicator = end_indicator return batch_data, batch_labels train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)] test_filenames = [os.path.join(CIFAR_DIR, 'test_batch')] train_data = CifarData(train_filenames, True) test_data = CifarData(test_filenames, False) x = tf.placeholder(tf.float32, [None, 3072]) y = tf.placeholder(tf.int64, [None]) # [None], eg: [0,5,6,3] x_image = tf.reshape(x, [-1, 3, 32, 32]) # 32*32 x_image = tf.transpose(x_image, perm=[0, 2, 3, 1]) # conv1: 神经元图, feature_map, 输出图像 conv1_1 = tf.layers.conv2d(x_image, 32, # output channel number (3,3), # kernel size padding = 'same', activation = tf.nn.relu, name = 'conv1_1') conv1_2 = tf.layers.conv2d(conv1_1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = tf.nn.relu, name = 'conv1_2') # 16 * 16 pooling1 = tf.layers.max_pooling2d(conv1_2, (2, 2), # kernel size (2, 2), # stride name = 'pool1') conv2_1 = tf.layers.conv2d(pooling1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = tf.nn.relu, name = 'conv2_1') conv2_2 = tf.layers.conv2d(conv2_1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = tf.nn.relu, name = 'conv2_2') # 8 * 8 pooling2 = tf.layers.max_pooling2d(conv2_2, (2, 2), # kernel size (2, 2), # stride name = 'pool2') conv3_1 = tf.layers.conv2d(pooling2, 32, # output channel number (3,3), # kernel size padding = 'same', activation = tf.nn.relu, name = 'conv3_1') conv3_2 = tf.layers.conv2d(conv3_1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = tf.nn.relu, name = 'conv3_2') # 4 * 4 * 32 pooling3 = tf.layers.max_pooling2d(conv3_2, (2, 2), # kernel size (2, 2), # stride name = 'pool3') # [None, 4 * 4 * 32] flatten = tf.layers.flatten(pooling3) y_ = tf.layers.dense(flatten, 10) loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_) # y_ -> sofmax # y -> one_hot # loss = ylogy_ # indices predict = tf.argmax(y_, 1) # [1,0,1,1,1,0,0,0] correct_prediction = tf.equal(predict, y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64)) with tf.name_scope('train_op'): train_op = tf.train.AdamOptimizer(1e-3).minimize(loss) init = tf.global_variables_initializer() batch_size = 20 train_steps = 10000 test_steps = 100 # train 10k: 73.4% with tf.Session() as sess: sess.run(init) for i in range(train_steps): batch_data, batch_labels = train_data.next_batch(batch_size) loss_val, acc_val, _ = sess.run( [loss, accuracy, train_op], feed_dict={ x: batch_data, y: batch_labels}) if (i+1) % 100 == 0: print('[Train] Step: %d, loss: %4.5f, acc: %4.5f' % (i+1, loss_val, acc_val)) if (i+1) % 1000 == 0: test_data = CifarData(test_filenames, False) all_test_acc_val = [] for j in range(test_steps): test_batch_data, test_batch_labels \ = test_data.next_batch(batch_size) test_acc_val = sess.run( [accuracy], feed_dict = { x: test_batch_data, y: test_batch_labels }) all_test_acc_val.append(test_acc_val) test_acc = np.mean(all_test_acc_val) print('[Test ] Step: %d, acc: %4.5f' % (i+1, test_acc))
激活函数 activation
激活函数是在这里设置
为了方便调整激活函数,先把网络层给封装一下
我们之前是用的relu函数作为激活函数进行训练的,acc在71.8%
这里我们把激活函数设为sigmoid,其他都不变,看一下效果
可以看到,最终acc只有54%左右,效果不如relu
之后我们换回relu
import tensorflow as tf import os import pickle import numpy as np CIFAR_DIR = "dataset/cifar-10-batches-py" print(os.listdir(CIFAR_DIR)) def load_data(filename): """read data from data file.""" with open(filename, 'rb') as f: data = pickle.load(f, encoding='bytes') return data[b'data'], data[b'labels'] # tensorflow.Dataset. class CifarData: def __init__(self, filenames, need_shuffle): all_data = [] all_labels = [] for filename in filenames: data, labels = load_data(filename) all_data.append(data) all_labels.append(labels) self._data = np.vstack(all_data) self._data = self._data / 127.5 - 1 self._labels = np.hstack(all_labels) print(self._data.shape) print(self._labels.shape) self._num_examples = self._data.shape[0] self._need_shuffle = need_shuffle self._indicator = 0 if self._need_shuffle: self._shuffle_data() def _shuffle_data(self): # [0,1,2,3,4,5] -> [5,3,2,4,0,1] p = np.random.permutation(self._num_examples) self._data = self._data[p] self._labels = self._labels[p] def next_batch(self, batch_size): """return batch_size examples as a batch.""" end_indicator = self._indicator + batch_size if end_indicator > self._num_examples: if self._need_shuffle: self._shuffle_data() self._indicator = 0 end_indicator = batch_size else: raise Exception("have no more examples") if end_indicator > self._num_examples: raise Exception("batch size is larger than all examples") batch_data = self._data[self._indicator: end_indicator] batch_labels = self._labels[self._indicator: end_indicator] self._indicator = end_indicator return batch_data, batch_labels train_filenames = [os.path.join(CIFAR_DIR, 'data_batch_%d' % i) for i in range(1, 6)] test_filenames = [os.path.join(CIFAR_DIR, 'test_batch')] train_data = CifarData(train_filenames, True) test_data = CifarData(test_filenames, False) x = tf.placeholder(tf.float32, [None, 3072]) y = tf.placeholder(tf.int64, [None]) # [None], eg: [0,5,6,3] x_image = tf.reshape(x, [-1, 3, 32, 32]) # 32*32 x_image = tf.transpose(x_image, perm=[0, 2, 3, 1]) def convnet(inputs, activation): # conv1: 神经元图, feature_map, 输出图像 conv1_1 = tf.layers.conv2d(x_image, 32, # output channel number (3,3), # kernel size padding = 'same', activation = activation, name = 'conv1_1') conv1_2 = tf.layers.conv2d(conv1_1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = activation, name = 'conv1_2') # 16 * 16 pooling1 = tf.layers.max_pooling2d(conv1_2, (2, 2), # kernel size (2, 2), # stride name = 'pool1') conv2_1 = tf.layers.conv2d(pooling1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = activation, name = 'conv2_1') conv2_2 = tf.layers.conv2d(conv2_1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = activation, name = 'conv2_2') # 8 * 8 pooling2 = tf.layers.max_pooling2d(conv2_2, (2, 2), # kernel size (2, 2), # stride name = 'pool2') conv3_1 = tf.layers.conv2d(pooling2, 32, # output channel number (3,3), # kernel size padding = 'same', activation = activation, name = 'conv3_1') conv3_2 = tf.layers.conv2d(conv3_1, 32, # output channel number (3,3), # kernel size padding = 'same', activation = activation, name = 'conv3_2') # 4 * 4 * 32 pooling3 = tf.layers.max_pooling2d(conv3_2, (2, 2), # kernel size (2, 2), # stride name = 'pool3') # [None, 4 * 4 * 32] flatten = tf.layers.flatten(pooling3) return flatten flatten = convnet(x_image, tf.nn.relu) y_ = tf.layers.dense(flatten, 10) loss = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=y_) # y_ -> sofmax # y -> one_hot # loss = ylogy_ # indices predict = tf.argmax(y_, 1) # [1,0,1,1,1,0,0,0] correct_prediction = tf.equal(predict, y) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64)) with tf.name_scope('train_op'): train_op = tf.train.AdamOptimizer(1e-3).minimize(loss) init = tf.global_variables_initializer() batch_size = 20 train_steps = 10000 test_steps = 100 # train 10k: 73.4% with tf.Session() as sess: sess.run(init) for i in range(train_steps): batch_data, batch_labels = train_data.next_batch(batch_size) loss_val, acc_val, _ = sess.run( [loss, accuracy, train_op], feed_dict={ x: batch_data, y: batch_labels}) if (i+1) % 100 == 0: print('[Train] Step: %d, loss: %4.5f, acc: %4.5f' % (i+1, loss_val, acc_val)) if (i+1) % 1000 == 0: test_data = CifarData(test_filenames, False) all_test_acc_val = [] for j in range(test_steps): test_batch_data, test_batch_labels \ = test_data.next_batch(batch_size) test_acc_val = sess.run( [accuracy], feed_dict = { x: test_batch_data, y: test_batch_labels }) all_test_acc_val.append(test_acc_val) test_acc = np.mean(all_test_acc_val) print('[Test ] Step: %d, acc: %4.5f' % (i+1, test_acc))
weight initializer 参数初始化
原本是没写的,如果想修改initializer的话需要自己加
默认使用的是 tf.glorot_uniform_initializer。默认是None,但是后来会使用一个超参数去修改它成tf.glorot_uniform_initializer
如果我们这么写,kernel_initializer是None的话,就是和之前是一样的
准确率在73%
如果我们换上tf.truncated_normal_initializer(stddev=0.02), 准确率在65%左右
如果换成 tf.keras.initializers.he_normal(),准确率在71%
但是我们了解到的是何恺明的方法是一个比较好的优化方法,在这里没有表现比较好的原因可能是我们的网络是一个比较浅层次的网络,6个卷积+1个全连接,还没有发挥出何恺明优化的效果
我们再改回原来默认的初始化器
optimizer 优化方法
优化器是在这里设置
现在用的是Adam
如果我们换成梯度下降gradientdescent
gradientdescent的lr值要设的小一点,否则会跑飞(loss值非但不会缓慢下降,反而会增长)
训练了一下,acc是57%
如果我们换成MomentumOptimizer,acc是36%
可以看出,不同的梯度优化方法的差异还是蛮大的
原因也有多个
可能是initializer和optimizer不匹配,调参是一个复杂系统的多因素影响的过程,单一更改一个因素可能会导致其他因素和它不协调而出现问题
还有可能是训练不充分。momentum的收敛是比较慢的,可能目前只是处于一个快要收敛的步数上