github:https://github.com/CreatCodeBuild/TensorFlow-and-DeepLearning-Tutorial/tree/master/Season1/12-15
视频:https://space.bilibili.com/16696495/channel/detail?cid=1588
<<<<<<< HEAD 关于卷积神经网络的理论知识,请一定阅读 cs231n 的课件。 虽然是英文的,但是内容浅显易读,又不失细节与深度,是理解卷积神经网络的绝佳资料。
Theano 的教程也有很详细的介绍 + 很爽的动画效果。虽然这里是TF的教程,但是知识是互通的。
本系列是“编程向”,所以一切理论都是点到为止。然后外链更优质的理论资源。这样大家的学习效率才高。
第 13 期涉及到的新概念
Max Pooling
Pooling 是图片的缩小(Downscaling)。这个操作是损失精度的。假如说 Pooling 的 scale 是 2。那么也就是将图片长宽各缩小至 1/2。也就是每 4 个像素点只取一个。
那么,Max Pooling 则是取最大值的那一个像素。而 Average Pooling 就是取 4 个像素点的平均值。据研究表明 Max Pooling 通常效果更好,所以在代码实例中被使用。
Relu Layer 的含义
Relu 是激活函数,定义为: relu(x) = max(x, 0)
或者可以写成
relu(x) = x if x > 0 else 0
所以,relu 就是一个线性的阀值函数而已
请参考
cs231n 关于 Convolutional Layer 架构的解释
维基百科关于 Relu 的解释
关于卷积神经网络的理论知识,请一定阅读 cs231n 的课件。 虽然是英文的,但是内容浅显易读,又不失细节与深度,是理解卷积神经网络的绝佳资料。
Theano 的教程也有很详细的介绍 + 很爽的动画效果。虽然这里是TF的教程,但是知识是互通的。
本系列是“编程向”,所以一切理论都是点到为止。然后外链更优质的理论资源。这样大家的学习效率才高。
第 13 期涉及到的新概念
Max Pooling
Pooling 是图片的缩小(Downscaling)。这个操作是损失精度的。假如说 Pooling 的 scale 是 2。那么也就是将图片长宽各缩小至 1/2。也就是每 4 个像素点只取一个。
那么,Max Pooling 则是取最大值的那一个像素。而 Average Pooling 就是取 4 个像素点的平均值。据研究表明 Max Pooling 通常效果更好,所以在代码实例中被使用。
Relu Layer 的含义
Relu 是激活函数,定义为: relu(x) = max(x, 0)
或者可以写成
relu(x) = x if x > 0 else 0
所以,relu 就是一个线性的阀值函数而已
请参考
cs231n 关于 Convolutional Layer 架构的解释
维基百科关于 Relu 的解释
API
from dp_refined_api import Network
# 首先,通过某种方式得到你的数据
# First,get your data somehow
train_samples, train_labels, test_samples, test_labels = get_your_data()
net = Network(train_batch_size=64, test_batch_size=500, pooling_scale=2)
net.define_inputs(
train_samples_shape=(64, image_size, image_size, num_channels),
train_labels_shape=(64, num_labels),
test_samples_shape=(500, image_size, image_size, num_channels)
)
net.add_conv(patch_size=3, in_depth=num_channels, out_depth=16, activation='relu', pooling=False, name='conv1')
net.add_conv(patch_size=3, in_depth=16, out_depth=16, activation='relu', pooling=True, name='conv2')
# 2 = 1次 pooling, 每一次缩小为 1/2
image_size = 32
net.add_fc(in_num_nodes=(image_size // 2) * (image_size // 2) * 16, out_num_nodes=16, activation='relu', name='fc1')
net.add_fc(in_num_nodes=16, out_num_nodes=10, activation=None, name='fc2')
# 在添加了所有层之后,定义模型
# After adding all layers, define the model
net.define_model()
# 运行网络
# Run the network
# data_iterator 是一个自定义的 Generator 函数, 用来给网络喂数据
net.run(data_iterator, train_samples, train_labels, test_samples, test_labels)
load.py
# encoding:utf-8
# Python2 兼容
from __future__ import print_function, division
from scipy.io import loadmat as load
import matplotlib.pyplot as plt
import numpy as np
def reformat(samples, labels):
# 改变原始数据的形状
# 0 1 2 3 3 0 1 2
# (图片高,图片宽,通道数,图片数) -> (图片数,图片高,图片宽,通道数)
new = np.transpose(samples, (3, 0, 1, 2)).astype(np.float32)
# labels 变成 one-hot encoding, [2] -> [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
# digit 0 , represented as 10
# labels 变成 one-hot encoding, [10] -> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
labels = np.array([x[0] for x in labels]) # slow code, whatever
one_hot_labels = []
for num in labels:
one_hot = [0.0] * 10
if num == 10:
one_hot[0] = 1.0
else:
one_hot[num] = 1.0
one_hot_labels.append(one_hot)
labels = np.array(one_hot_labels).astype(np.float32)
return new, labels
def normalize(samples):
'''
并且灰度化: 从三色通道 -> 单色通道 省内存 + 加快训练速度
(R + G + B) / 3
将图片从 0 ~ 255 线性映射到 -1.0 ~ +1.0
@samples: numpy array
'''
a = np.add.reduce(samples, keepdims=True, axis=3) # shape (图片数,图片高,图片宽,通道数)
a = a/3.0
return a/128.0 - 1.0
def distribution(labels, name):
# 查看一下每个label的分布,再画个统计图
# keys:
# 0
# 1
# 2
# ...
# 9
count = {}
for label in labels:
key = 0 if label[0] == 10 else label[0]
if key in count:
count[key] += 1
else:
count[key] = 1
x = []
y = []
for k, v in count.items():
# print(k, v)
x.append(k)
y.append(v)
y_pos = np.arange(len(x))
plt.bar(y_pos, y, align='center', alpha=0.5)
plt.xticks(y_pos, x)
plt.ylabel('Count')
plt.title(name + ' Label Distribution')
plt.show()
def inspect(dataset, labels, i):
# 显示图片看看
if dataset.shape[3] == 1:
shape = dataset.shape
dataset = dataset.reshape(shape[0], shape[1], shape[2])
print(labels[i])
plt.imshow(dataset[i])
plt.show()
train = load('../data/train_32x32.mat')
test = load('../data/test_32x32.mat')
# extra = load('../data/extra_32x32.mat')
# print('Train Samples Shape:', train['X'].shape)
# print('Train Labels Shape:', train['y'].shape)
# print('Train Samples Shape:', test['X'].shape)
# print('Train Labels Shape:', test['y'].shape)
# print('Train Samples Shape:', extra['X'].shape)
# print('Train Labels Shape:', extra['y'].shape)
train_samples = train['X']
train_labels = train['y']
test_samples = test['X']
test_labels = test['y']
# extra_samples = extra['X']
# extra_labels = extra['y']
n_train_samples, _train_labels = reformat(train_samples, train_labels)
n_test_samples, _test_labels = reformat(test_samples, test_labels)
_train_samples = normalize(n_train_samples)
_test_samples = normalize(n_test_samples)
num_labels = 10
image_size = 32
num_channels = 1
if __name__ == '__main__':
# 探索数据
pass
inspect(_train_samples, _train_labels, 1234)
# _train_samples = normalize(_train_samples)
# inspect(_train_samples, _train_labels, 1234)
# distribution(train_labels, 'Train Labels')
# distribution(test_labels, 'Test Labels')
dp_refined_api.py
# 新的 refined api 不支持 Python2
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
class Network():
def __init__(self, train_batch_size, test_batch_size, pooling_scale):
'''
@num_hidden: 隐藏层的节点数量
@batch_size:因为我们要节省内存,所以分批处理数据。每一批的数据量。
'''
self.train_batch_size = train_batch_size
self.test_batch_size = test_batch_size
# Hyper Parameters
self.conv_config = [] # list of dict
self.fc_config = [] # list of dict
self.conv_weights = []
self.conv_biases = []
self.fc_weights = []
self.fc_biases = []
self.pooling_scale = pooling_scale
self.pooling_stride = pooling_scale
# Graph Related
self.tf_train_samples = None
self.tf_train_labels = None
self.tf_test_samples = None
self.tf_test_labels = None
# 统计
self.merged = None
self.train_summaries = []
self.test_summaries = []
def add_conv(self, *, patch_size, in_depth, out_depth, activation='relu', pooling=False, name):
'''
This function does not define operations in the graph, but only store config in self.conv_layer_config
'''
self.conv_config.append({
'patch_size': patch_size,
'in_depth': in_depth,
'out_depth': out_depth,
'activation': activation,
'pooling': pooling,
'name': name
})
with tf.name_scope(name):
weights = tf.Variable(
tf.truncated_normal([patch_size, patch_size, in_depth, out_depth], stddev=0.1), name=name+'_weights')
biases = tf.Variable(tf.constant(0.1, shape=[out_depth]), name=name+'_biases')
self.conv_weights.append(weights)
self.conv_biases.append(biases)
def add_fc(self, *, in_num_nodes, out_num_nodes, activation='relu', name):
'''
add fc layer config to slef.fc_layer_config
'''
self.fc_config.append({
'in_num_nodes': in_num_nodes,
'out_num_nodes': out_num_nodes,
'activation': activation,
'name': name
})
with tf.name_scope(name):
weights = tf.Variable(tf.truncated_normal([in_num_nodes, out_num_nodes], stddev=0.1))
biases = tf.Variable(tf.constant(0.1, shape=[out_num_nodes]))
self.fc_weights.append(weights)
self.fc_biases.append(biases)
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_weights))+'_weights', weights))
self.train_summaries.append(tf.histogram_summary(str(len(self.fc_biases))+'_biases', biases))
# should make the definition as an exposed API, instead of implemented in the function
def define_inputs(self, *, train_samples_shape, train_labels_shape, test_samples_shape):
# 这里只是定义图谱中的各种变量
with tf.name_scope('inputs'):
self.tf_train_samples = tf.placeholder(tf.float32, shape=train_samples_shape, name='tf_train_samples')
self.tf_train_labels = tf.placeholder(tf.float32, shape=train_labels_shape, name='tf_train_labels')
self.tf_test_samples = tf.placeholder(tf.float32, shape=test_samples_shape, name='tf_test_samples')
def define_model(self):
'''
定义我的的计算图谱
'''
def model(data_flow, train=True):
'''
@data: original inputs
@return: logits
'''
# Define Convolutional Layers
for i, (weights, biases, config) in enumerate(zip(self.conv_weights, self.conv_biases, self.conv_config)):
with tf.name_scope(config['name'] + '_model'):
with tf.name_scope('convolution'):
# default 1,1,1,1 stride and SAME padding
data_flow = tf.nn.conv2d(data_flow, filter=weights, strides=[1, 1, 1, 1], padding='SAME')
data_flow = data_flow + biases
if not train:
self.visualize_filter_map(data_flow, how_many=config['out_depth'], display_size=32//(i//2+1), name=config['name']+'_conv')
if config['activation'] == 'relu':
data_flow = tf.nn.relu(data_flow)
if not train:
self.visualize_filter_map(data_flow, how_many=config['out_depth'], display_size=32//(i//2+1), name=config['name']+'_relu')
else:
raise Exception('Activation Func can only be Relu right now. You passed', config['activation'])
if config['pooling']:
data_flow = tf.nn.max_pool(
data_flow,
ksize=[1, self.pooling_scale, self.pooling_scale, 1],
strides=[1, self.pooling_stride, self.pooling_stride, 1],
padding='SAME')
if not train:
self.visualize_filter_map(data_flow, how_many=config['out_depth'], display_size=32//(i//2+1)//2, name=config['name']+'_pooling')
# Define Fully Connected Layers
for i, (weights, biases, config) in enumerate(zip(self.fc_weights, self.fc_biases, self.fc_config)):
if i == 0:
shape = data_flow.get_shape().as_list()
data_flow = tf.reshape(data_flow, [shape[0], shape[1] * shape[2] * shape[3]])
with tf.name_scope(config['name'] + 'model'):
data_flow = tf.matmul(data_flow, weights) + biases
if config['activation'] == 'relu':
data_flow = tf.nn.relu(data_flow)
elif config['activation'] is None:
pass
else:
raise Exception('Activation Func can only be Relu or None right now. You passed', config['activation'])
return data_flow
# Training computation.
logits = model(self.tf_train_samples)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels))
self.train_summaries.append(tf.scalar_summary('Loss', self.loss))
# Optimizer.
with tf.name_scope('optimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(0.0001).minimize(self.loss)
# Predictions for the training, validation, and test data.
with tf.name_scope('train'):
self.train_prediction = tf.nn.softmax(logits, name='train_prediction')
with tf.name_scope('test'):
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction')
self.merged_train_summary = tf.merge_summary(self.train_summaries)
self.merged_test_summary = tf.merge_summary(self.test_summaries)
def run(self, data_iterator, train_samples, train_labels, test_samples, test_labels):
'''
用到Session
:data_iterator: a function that yields chuck of data
'''
# private function
def print_confusion_matrix(confusionMatrix):
print('Confusion Matrix:')
for i, line in enumerate(confusionMatrix):
print(line, line[i] / np.sum(line))
a = 0
for i, column in enumerate(np.transpose(confusionMatrix, (1, 0))):
a += (column[i] / np.sum(column)) * (np.sum(column) / 26000)
print(column[i] / np.sum(column), )
print('\n', np.sum(confusionMatrix), a)
self.writer = tf.train.SummaryWriter('./board', tf.get_default_graph())
with tf.Session(graph=tf.get_default_graph()) as session:
tf.initialize_all_variables().run()
### 训练
print('Start Training')
# batch 1000
for i, samples, labels in data_iterator(train_samples, train_labels, chunkSize=self.train_batch_size):
_, l, predictions, summary = session.run(
[self.optimizer, self.loss, self.train_prediction, self.merged_train_summary],
feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
)
self.writer.add_summary(summary, i)
# labels is True Labels
accuracy, _ = self.accuracy(predictions, labels)
if i % 50 == 0:
print('Minibatch loss at step %d: %f' % (i, l))
print('Minibatch accuracy: %.1f%%' % accuracy)
###
# ### 测试
accuracies = []
confusionMatrices = []
for i, samples, labels in data_iterator(test_samples, test_labels, chunkSize=self.test_batch_size):
print('samples shape', samples.shape)
result, summary = session.run(
[self.test_prediction, self.merged_test_summary],
feed_dict={self.tf_test_samples: samples}
)
# result = self.test_prediction.eval(feed_dict={self.tf_test_samples: samples})
self.writer.add_summary(summary, i)
accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
accuracies.append(accuracy)
confusionMatrices.append(cm)
print('Test Accuracy: %.1f%%' % accuracy)
print(' Average Accuracy:', np.average(accuracies))
print('Standard Deviation:', np.std(accuracies))
print_confusion_matrix(np.add.reduce(confusionMatrices))
###
def accuracy(self, predictions, labels, need_confusion_matrix=False):
'''
计算预测的正确率与召回率
@return: accuracy and confusionMatrix as a tuple
'''
_predictions = np.argmax(predictions, 1)
_labels = np.argmax(labels, 1)
cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None
# == is overloaded for numpy array
accuracy = (100.0 * np.sum(_predictions == _labels) / predictions.shape[0])
return accuracy, cm
def visualize_filter_map(self, tensor, *, how_many, display_size, name):
print(tensor.get_shape)
filter_map = tensor[-1]
print(filter_map.get_shape())
filter_map = tf.transpose(filter_map, perm=[2, 0, 1])
print(filter_map.get_shape())
filter_map = tf.reshape(filter_map, (how_many, display_size, display_size, 1))
print(how_many)
self.test_summaries.append(tf.image_summary(name, tensor=filter_map, max_images=how_many))
dp.py
# 为了 Python2 玩家们
from __future__ import print_function, division
# 第三方
import tensorflow as tf
from sklearn.metrics import confusion_matrix
import numpy as np
# 我们自己
import load
train_samples, train_labels = load._train_samples, load._train_labels
test_samples, test_labels = load._test_samples, load._test_labels
print('Training set', train_samples.shape, train_labels.shape)
print(' Test set', test_samples.shape, test_labels.shape)
image_size = load.image_size
num_labels = load.num_labels
num_channels = load.num_channels
def get_chunk(samples, labels, chunkSize):
'''
Iterator/Generator: get a batch of data
这个函数是一个迭代器/生成器,用于每一次只得到 chunkSize 这么多的数据
用于 for loop, just like range() function
'''
if len(samples) != len(labels):
raise Exception('Length of samples and labels must equal')
stepStart = 0 # initial step
i = 0
while stepStart < len(samples):
stepEnd = stepStart + chunkSize
if stepEnd < len(samples):
yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd]
i += 1
stepStart = stepEnd
class Network():
def __init__(self, num_hidden, batch_size, conv_depth, patch_size, pooling_scale):
'''
@num_hidden: 隐藏层的节点数量
@batch_size:因为我们要节省内存,所以分批处理数据。每一批的数据量。
'''
self.batch_size = batch_size
self.test_batch_size = 500
# Hyper Parameters
self.num_hidden = num_hidden
self.patch_size = patch_size # 滑窗的大小
self.conv1_depth = conv_depth
self.conv2_depth = conv_depth
self.conv3_depth = conv_depth
self.conv4_depth = conv_depth
self.last_conv_depth = self.conv4_depth
self.pooling_scale = pooling_scale
self.pooling_stride = self.pooling_scale # Max Pooling Stride
# Graph Related
self.graph = tf.Graph()
self.tf_train_samples = None
self.tf_train_labels = None
self.tf_test_samples = None
self.tf_test_labels = None
self.tf_test_prediction = None
# 统计
self.merged = None
self.train_summaries = []
self.test_summaries = []
# 初始化
self.define_graph()
self.session = tf.Session(graph=self.graph)
self.writer = tf.train.SummaryWriter('./board', self.graph)
def define_graph(self):
'''
定义我的的计算图谱
'''
with self.graph.as_default():
# 这里只是定义图谱中的各种变量
with tf.name_scope('inputs'):
self.tf_train_samples = tf.placeholder(
tf.float32, shape=(self.batch_size, image_size, image_size, num_channels), name='tf_train_samples'
)
self.tf_train_labels = tf.placeholder(
tf.float32, shape=(self.batch_size, num_labels), name='tf_train_labels'
)
self.tf_test_samples = tf.placeholder(
tf.float32, shape=(self.test_batch_size, image_size, image_size, num_channels), name='tf_test_samples'
)
with tf.name_scope('conv1'):
conv1_weights = tf.Variable(
tf.truncated_normal([self.patch_size, self.patch_size, num_channels, self.conv1_depth], stddev=0.1))
conv1_biases = tf.Variable(tf.zeros([self.conv1_depth]))
with tf.name_scope('conv2'):
conv2_weights = tf.Variable(
tf.truncated_normal([self.patch_size, self.patch_size, self.conv1_depth, self.conv2_depth], stddev=0.1))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[self.conv2_depth]))
with tf.name_scope('conv3'):
conv3_weights = tf.Variable(
tf.truncated_normal([self.patch_size, self.patch_size, self.conv2_depth, self.conv3_depth], stddev=0.1))
conv3_biases = tf.Variable(tf.constant(0.1, shape=[self.conv3_depth]))
with tf.name_scope('conv4'):
conv4_weights = tf.Variable(
tf.truncated_normal([self.patch_size, self.patch_size, self.conv3_depth, self.conv4_depth], stddev=0.1))
conv4_biases = tf.Variable(tf.constant(0.1, shape=[self.conv4_depth]))
# fully connected layer 1, fully connected
with tf.name_scope('fc1'):
down_scale = self.pooling_scale ** 2 # because we do 2 times pooling of stride 2
fc1_weights = tf.Variable(
tf.truncated_normal(
[(image_size // down_scale) * (image_size // down_scale) * self.last_conv_depth, self.num_hidden], stddev=0.1))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]))
self.train_summaries.append(tf.histogram_summary('fc1_weights', fc1_weights))
self.train_summaries.append(tf.histogram_summary('fc1_biases', fc1_biases))
# fully connected layer 2 --> output layer
with tf.name_scope('fc2'):
fc2_weights = tf.Variable(tf.truncated_normal([self.num_hidden, num_labels], stddev=0.1), name='fc2_weights')
fc2_biases = tf.Variable(tf.constant(0.1, shape=[num_labels]), name='fc2_biases')
self.train_summaries.append(tf.histogram_summary('fc2_weights', fc2_weights))
self.train_summaries.append(tf.histogram_summary('fc2_biases', fc2_biases))
# 想在来定义图谱的运算
def model(data, train=True):
'''
@data: original inputs
@return: logits
'''
with tf.name_scope('conv1_model'):
with tf.name_scope('convolution'):
conv1 = tf.nn.conv2d(data, filter=conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
addition = conv1 + conv1_biases
hidden = tf.nn.relu(addition)
if not train:
# transpose the output of an activation to image
# conv1_activation_relu shape: (8, 32, 32, 64)
# 64 filter maps from this convolution, that's 64 grayscale images
# image size is 32x32
# 8 is the batch_size, which means 8 times of convolution was performed
# just use the last one (index 7) as record
filter_map = hidden[-1]
filter_map = tf.transpose(filter_map, perm=[2, 0, 1])
filter_map = tf.reshape(filter_map, (self.conv1_depth, 32, 32, 1))
self.test_summaries.append(tf.image_summary('conv1_relu', tensor=filter_map, max_images=self.conv1_depth))
with tf.name_scope('conv2_model'):
with tf.name_scope('convolution'):
conv2 = tf.nn.conv2d(hidden, filter=conv2_weights, strides=[1, 1, 1, 1], padding='SAME')
addition = conv2 + conv2_biases
hidden = tf.nn.relu(addition)
hidden = tf.nn.max_pool(
hidden,
ksize=[1,self.pooling_scale,self.pooling_scale,1],
strides=[1,self.pooling_stride,self.pooling_stride,1],
padding='SAME')
with tf.name_scope('conv3_model'):
with tf.name_scope('convolution'):
conv3 = tf.nn.conv2d(hidden, filter=conv3_weights, strides=[1, 1, 1, 1], padding='SAME')
addition = conv3 + conv3_biases
hidden = tf.nn.relu(addition)
with tf.name_scope('conv4_model'):
with tf.name_scope('convolution'):
conv4 = tf.nn.conv2d(hidden, filter=conv4_weights, strides=[1, 1, 1, 1], padding='SAME')
addition = conv4 + conv4_biases
hidden = tf.nn.relu(addition)
# if not train:
# filter_map = hidden[-1]
# filter_map = tf.transpose(filter_map, perm=[2, 0, 1])
# filter_map = tf.reshape(filter_map, (self.conv4_depth, 16, 16, 1))
# tf.image_summary('conv4_relu', tensor=filter_map, max_images=self.conv4_depth)
hidden = tf.nn.max_pool(
hidden,
ksize=[1,self.pooling_scale,self.pooling_scale,1],
strides=[1,self.pooling_stride,self.pooling_stride,1],
padding='SAME')
# fully connected layer 1
shape = hidden.get_shape().as_list()
reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]])
with tf.name_scope('fc1_model'):
fc1_model = tf.matmul(reshape, fc1_weights) + fc1_biases
hidden = tf.nn.relu(fc1_model)
# fully connected layer 2
with tf.name_scope('fc2_model'):
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation.
logits = model(self.tf_train_samples)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, self.tf_train_labels))
self.train_summaries.append(tf.scalar_summary('Loss', self.loss))
# Optimizer.
with tf.name_scope('optimizer'):
self.optimizer = tf.train.GradientDescentOptimizer(0.0001).minimize(self.loss)
# Predictions for the training, validation, and test data.
with tf.name_scope('train'):
self.train_prediction = tf.nn.softmax(logits, name='train_prediction')
with tf.name_scope('test'):
self.test_prediction = tf.nn.softmax(model(self.tf_test_samples, train=False), name='test_prediction')
self.merged_train_summary = tf.merge_summary(self.train_summaries)
self.merged_test_summary = tf.merge_summary(self.test_summaries)
def run(self):
'''
用到Session
'''
# private function
def print_confusion_matrix(confusionMatrix):
print('Confusion Matrix:')
for i, line in enumerate(confusionMatrix):
print(line, line[i]/np.sum(line))
a = 0
for i, column in enumerate(np.transpose(confusionMatrix, (1, 0))):
a += (column[i]/np.sum(column))*(np.sum(column)/26000)
print(column[i]/np.sum(column),)
print('\n',np.sum(confusionMatrix), a)
with self.session as session:
tf.initialize_all_variables().run()
### 训练
print('Start Training')
# batch 1000
for i, samples, labels in get_chunk(train_samples, train_labels, chunkSize=self.batch_size):
_, l, predictions, summary = session.run(
[self.optimizer, self.loss, self.train_prediction, self.merged_train_summary],
feed_dict={self.tf_train_samples: samples, self.tf_train_labels: labels}
)
self.writer.add_summary(summary, i)
# labels is True Labels
accuracy, _ = self.accuracy(predictions, labels)
if i % 50 == 0:
print('Minibatch loss at step %d: %f' % (i, l))
print('Minibatch accuracy: %.1f%%' % accuracy)
###
### 测试
accuracies = []
confusionMatrices = []
for i, samples, labels in get_chunk(test_samples, test_labels, chunkSize=self.test_batch_size):
result, summary = session.run(
[self.test_prediction, self.merged_test_summary],
feed_dict={self.tf_test_samples: samples}
)
# result = self.test_prediction.eval()
self.writer.add_summary(summary, i)
accuracy, cm = self.accuracy(result, labels, need_confusion_matrix=True)
accuracies.append(accuracy)
confusionMatrices.append(cm)
print('Test Accuracy: %.1f%%' % accuracy)
print(' Average Accuracy:', np.average(accuracies))
print('Standard Deviation:', np.std(accuracies))
print_confusion_matrix(np.add.reduce(confusionMatrices))
###
def accuracy(self, predictions, labels, need_confusion_matrix=False):
'''
计算预测的正确率与召回率
@return: accuracy and confusionMatrix as a tuple
'''
_predictions = np.argmax(predictions, 1)
_labels = np.argmax(labels, 1)
cm = confusion_matrix(_labels, _predictions) if need_confusion_matrix else None
# == is overloaded for numpy array
accuracy = (100.0 * np.sum(_predictions == _labels) / predictions.shape[0])
return accuracy, cm
if __name__ == '__main__':
net = Network(num_hidden=16, batch_size=64, patch_size=3, conv_depth=16, pooling_scale=2)
net.run()
main.py
if __name__ == '__main__':
import load
from dp_refined_api import Network
train_samples, train_labels = load._train_samples, load._train_labels
test_samples, test_labels = load._test_samples, load._test_labels
print('Training set', train_samples.shape, train_labels.shape)
print(' Test set', test_samples.shape, test_labels.shape)
image_size = load.image_size
num_labels = load.num_labels
num_channels = load.num_channels
def get_chunk(samples, labels, chunkSize):
'''
Iterator/Generator: get a batch of data
这个函数是一个迭代器/生成器,用于每一次只得到 chunkSize 这么多的数据
用于 for loop, just like range() function
'''
if len(samples) != len(labels):
raise Exception('Length of samples and labels must equal')
stepStart = 0 # initial step
i = 0
while stepStart < len(samples):
stepEnd = stepStart + chunkSize
if stepEnd < len(samples):
yield i, samples[stepStart:stepEnd], labels[stepStart:stepEnd]
i += 1
stepStart = stepEnd
net = Network(train_batch_size=64, test_batch_size=500, pooling_scale=2)
net.define_inputs(
train_samples_shape=(64, image_size, image_size, num_channels),
train_labels_shape=(64, num_labels),
test_samples_shape=(500, image_size, image_size, num_channels)
)
#
net.add_conv(patch_size=3, in_depth=num_channels, out_depth=16, activation='relu', pooling=False, name='conv1')
net.add_conv(patch_size=3, in_depth=16, out_depth=16, activation='relu', pooling=True, name='conv2')
net.add_conv(patch_size=3, in_depth=16, out_depth=16, activation='relu', pooling=False, name='conv3')
net.add_conv(patch_size=3, in_depth=16, out_depth=16, activation='relu', pooling=True, name='conv4')
# 4 = 两次 pooling, 每一次缩小为 1/2
# 16 = conv4 out_depth
net.add_fc(in_num_nodes=(image_size // 4) * (image_size // 4) * 16, out_num_nodes=16, activation='relu', name='fc1')
net.add_fc(in_num_nodes=16, out_num_nodes=10, activation=None, name='fc2')
net.define_model()
net.run(get_chunk, train_samples, train_labels, test_samples, test_labels)
else:
raise Exception('main.py: Should Not Be Imported!!! Must Run by "python main.py"')