1、为什么要用TensorFlow?
TensorFlow的作用是让你从诸如“如何读入一堆图片”“如何处理图像的灰度值”“如何归一化向量”这样的细节问题中解脱出来,让你不用担心自己的模型中有各种各样的bug;因此,你可以更关注于建立一个优秀的模型,提升模型的性能,使它的精确度更高,泛化性更好等等。
Heavy concepts,light codes.
2、TensorFlow的特点
为了用python实现高效的数值计算,我们通常会使用函数库,比如NumPy,会把类似矩阵乘法这样的复杂运算使用其他外部语言实现。不幸的是,从外部计算切换回Python的每一个操作,仍然是一个很大的开销。如果你用GPU来进行外部计算,这样的开销会更大。用分布式的计算方式,也会花费更多的资源用来传输数据。
TensorFlow也把复杂的计算放在python之外完成,但是为了避免前面说的那些开销,它做了进一步完善。Tensorflow不单独地运行单一的复杂计算,而是让我们可以先用图描述一系列可交互的计算操作,然后全部一起在Python之外运行。(这样类似的运行方式,可以在不少的机器学习库中看到。)
3、用TensorFlow框架实现MNIST数据集识别
import tensorflow as tf
import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# input images as N*784 matrix
x = tf.placeholder("float", [None, 784])
# weight
W = tf.Variable(tf.zeros([784, 10]))
# bias
b = tf.Variable(tf.zeros([10]))
# softmax predict
y = tf.nn.softmax(tf.matmul(x, W) + b)
# labels
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
# train setting: learning rate 0.01, loss function cross entropy
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
# initialize
init = tf.global_variables_initializer()
# default session
sess = tf.Session()
sess.run(init)
# trainning
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_:batch_ys})
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))
print( sess.run(accuracy, feed_dict={x:mnist.test.images, y_:mnist.test.labels}) )
附录:
TensorFlow中文社区的MNIST手写数字识别教程的网址
http://www.tensorfly.cn/tfdoc/tutorials/mnist_beginners.html
input_data.py网址上已经下不到了,在别的地方找到了。
出处:“https://blog.csdn.net/akadiao/article/details/78351021”,感谢原博主。
# #!/usr/bin/python
# # coding:utf-8
# 用于下载和读取MNIST数据的函数
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tensorflow.python.platform
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
# 若数据不存在,则从Yann的网站下载数据
def maybe_download(filename, work_directory):
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
# 若指定路径不存在,则开始从原网站上下载
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
# 将图像提取到一个4维uint8类型的numpy数组[index, y, x, depth]
def extract_images(filename):
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
# 将类标签从标量转换为一个one-hot向量
def dense_to_one_hot(labels_dense, num_classes=10):
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
# 将标签提取到一维uint8类型的numpy数组[index]中
def extract_labels(filename, one_hot=False):
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' % (magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
# 构造DataSet类
# one_hot arg仅在fake_data为true时使用
# `dtype`可以是`uint8`,将输入保留为`[0,255]`,或`float32`以重新调整为[0,1]。
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False, dtype=tf.float32):
dtype = tf.as_dtype(dtype).base_dtype
if dtype not in (tf.uint8, tf.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], ('images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# 将[num examples, rows, columns, depth]转换形状成[num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0], images.shape[1] * images.shape[2])
if dtype == tf.float32:
# 将[0, 255]转换为[0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
# 从数据集返回下一个`batch_size`示例
def next_batch(self, batch_size, fake_data=False):
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
# 完成一个epoch
if self._index_in_epoch > self._num_examples:
# 随机抽取数据
self._epochs_completed += 1
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# 开始下一个epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
# 读取训练数据
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
class DataSets(object):
pass
data_sets = DataSets()
# 若fake_data为true则返回空数据
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
data_sets.train = fake()
data_sets.validation = fake()
data_sets.test = fake()
return data_sets
# 训练和测试数据文件名
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
# 读取训练和测试数据
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
# 取前5000个作为验证数据
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
# 取前5000个以后的作为训练数据
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
# 定义训练,验证和测试
data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
data_sets.validation = DataSet(validation_images, validation_labels, dtype=dtype)
data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
return data_sets
下载下来的数据集被分成两部分:60000行的训练数据集(mnist.train
)和10000行的测试数据集(mnist.test
)。