苦于网上没有完整的代码,这里直接上代码。代码来自《深入理解tensorflow架构设计与实现》
简单分布式
distribute.py:
import tensorflow as tf
import time
flags = tf.app.flags
flags.DEFINE_string("ps_hosts", "", "comma sep ip:port")
flags.DEFINE_string("worker_hosts", "", "comma sep ip:port")
flags.DEFINE_string("job_name", "", "ps/worker")
flags.DEFINE_integer("task_index", 0, "iths hosts")
def main(_):
workers = flags.FLAGS.worker_hosts.split(",")
ps = flags.FLAGS.ps_hosts.split(",")
cluster_spec = tf.train.ClusterSpec({
"worker":workers,
"ps":ps
})
print("cluster_spec:", cluster_spec, workers, ps)
server = tf.train.Server(cluster_spec, job_name=flags.FLAGS.job_name, task_index=flags.FLAGS.task_index)
if flags.FLAGS.job_name == "ps":
print("ps running...")
server.join()
return
is_chief = (flags.FLAGS.task_index == 0)
with tf.device("/job:ps/task:0/cpu:0"):
global_step = tf.Variable(0, tf.int32, name="global_step")
gadd = tf.assign_add(global_step, 1)
with tf.device(
tf.train.replica_device_setter(
worker_device="/job:worker/task:%d/cpu:%d" % (flags.FLAGS.task_index, 0),
ps_device="/job:ps/cpu:0",
cluster=cluster_spec
)
):
a = tf.Variable(0, dtype=tf.int32, name="a")
c = a * a
adderA = tf.assign_add(a, c)
printA = tf.Print(a, [a], "a=")
b = tf.Variable(0, dtype=tf.int32, name="b")
adderB = tf.assign_add(b, 1)
printB = tf.Print(b, [b], "b=")
train_op = [a, b, adderA, adderB]
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True,
device_filters = ["/job:ps", "/job:worker/task:%d" % flags.FLAGS.task_index]
)
print("worker running...")
svr = tf.train.Supervisor(logdir="./supervisor_log", is_chief=is_chief, init_op=tf.global_variables_initializer(),
local_init_op=tf.local_variables_initializer(),
global_step=global_step)
sess = svr.prepare_or_wait_for_session(server.target, config=sess_config)
for i in range(10):
print("run:", i, sess.run(train_op + [gadd]))
time.sleep(2)
print("finish, a,b=", sess.run(train_op[:2]))
if __name__ == "__main__":
tf.app.run()
'''
python distribute.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=0 &
python distribute.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=1 &
python distribute.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=0 &
python distribute.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=1 &
'''
带模型的分布式
dist.py
#-*- coding:utf-8-*-
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
#tf version: 2.5.0-rc3
import time
import math
#from tensorflow.examples.tutorials.mnist import input_data
import input_data
#1.2.1
flags = tf.app.flags
flags.DEFINE_string("ps_hosts", "", "comma sep ip:port")
flags.DEFINE_string("worker_hosts", "", "comma sep ip:port")
flags.DEFINE_string("job_name", "", "ps/worker")
flags.DEFINE_integer("task_index", 0, "iths hosts")
flags.DEFINE_string("data_dir", "./data/mnist_data", "Directory for storing mnist data")
flags.DEFINE_string("train_dir", "./supervisor_log", "Director form storing checkpoint")
flags.DEFINE_integer("replicas_to_aggregate", None, "Number of replicas to aggregate before parameter update is applied (for sync_replicas mode only; default: num_workers)")
flags.DEFINE_integer("hidden_units", 100, "Number of units in hidden layers of the NN")
flags.DEFINE_integer("train_steps", 200, "Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_bool("sync_replicas", False, "Use the sync_replicas(Synchornized replicas)mode, where in the parameter server updates from workers are aggregated before applied to avoid stale gradient")
IMAGE_PIXELS = 28
FLAGS=flags.FLAGS
def main(_):
if len(FLAGS.job_name) == 0:
raise ValueError("job_name cannot be empty")
workers = flags.FLAGS.worker_hosts.split(",")
ps = flags.FLAGS.ps_hosts.split(",")
cluster_spec = tf.train.ClusterSpec({
"worker":workers,
"ps":ps
})
print("cluster_spec:", cluster_spec, workers, ps)
server = tf.train.Server(cluster_spec, job_name=flags.FLAGS.job_name, task_index=flags.FLAGS.task_index)
if flags.FLAGS.job_name == "ps":
print("ps running...")
server.join()
return
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
is_chief = (flags.FLAGS.task_index == 0)
num_workers = len(workers)
with tf.device(
tf.train.replica_device_setter(
worker_device="/job:worker/task:%d/cpu:%d" % (flags.FLAGS.task_index, 0),
ps_device="/job:ps/cpu:0",
cluster=cluster_spec
)
):
global_step = tf.Variable(0, trainable=False, name="global_step")
#构建神经网络模型[batch_size, IMAGE_PIXELS*IMAGE_PIXELS] [IMAGE_PIXELS*IMAGE_PIXELS, FLAGS.hidden_units] [FLAGS.hidden_units, 10]
hide_w = tf.Variable(tf.truncated_normal([IMAGE_PIXELS*IMAGE_PIXELS, FLAGS.hidden_units], stddev = 1.0/IMAGE_PIXELS), name="hide_w")
hide_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hide_b")
sm_w = tf.Variable(tf.truncated_normal([FLAGS.hidden_units, 10], stddev=1.0/math.sqrt(FLAGS.hidden_units)), name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
x = tf.placeholder(dtype=tf.float32, shape=[None, IMAGE_PIXELS * IMAGE_PIXELS], name="x")
y_ = tf.placeholder(dtype=tf.float32, shape=[None, 10], name="y_")
hide_lin = tf.nn.xw_plus_b(x, hide_w, hide_b)
hid = tf.nn.relu(hide_lin, name="hide1")
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
#同步训练&异步训练优化器不同
if FLAGS.sync_replicas:
if FLAGS.replicas_to_aggregate is None:
print("replicas_to_aggregate is None, set to", num_workers)
replicas_to_aggregate = num_workers
else:
replicas_to_aggregate = FLAGS.replicas_to_aggregate
opt = tf.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=num_workers, name="mnist_sync_replicas")
train_op = opt.minimize(cross_entropy, global_step=global_step)
#同步训练需要初始化更多
if FLAGS.sync_replicas:
local_init_op = opt.local_step_init_op
if is_chief:
local_init_op = opt.chief_init_op
ready_for_local_init_op = opt.ready_for_local_init_op
chief_queue_runner = opt.get_chief_queue_runner()
sync_init_op = opt.get_init_tokens_op()
init_op = tf.global_variables_initializer()
#创建supervisor. supervisor 能管理saver, coordinator,初始化变量
if FLAGS.sync_replicas: #同步模式下要启动其他worker,最后启动chief worker
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
local_init_op=local_init_op,
ready_for_local_init_op=ready_for_local_init_op,
recovery_wait_secs=1, #设置小点
global_step=global_step
)
else:
sv = tf.train.Supervisor(is_chief=is_chief,
logdir=FLAGS.train_dir,
init_op=init_op,
global_step=global_step
)
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
device_filters = ["/job:ps", "/job:worker/task:%d" % FLAGS.task_index]
)
if is_chief:
print("Worker %d: Initializing session..." % FLAGS.task_index)
else:
print("Worker %d: Waiting form session to be initialized..." % FLAGS.task_index)
sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
#只有chief初始化并启动queue runner
if is_chief and FLAGS.sync_replicas:
print("init sync op and queue")
sess.run(sync_init_op)
sv.start_queue_runners(sess, [chief_queue_runner])
begin_time = time.time()
print("Training begins @ %f" % begin_time)
local_step = 0
try:
while not sv.should_stop(): #开始训练
batchs_xs, batchs_ys = mnist.train.next_batch(FLAGS.batch_size)
train_feed = {x:batchs_xs, y_:batchs_ys}
_, step = sess.run([train_op, global_step], feed_dict=train_feed)
local_step += 1
print("%f: Worker %d: training step %d done (global_step: %d) loss=%.4f" %
(time.time(), FLAGS.task_index, local_step, step, sess.run(cross_entropy, feed_dict=train_feed)))
if step > FLAGS.train_steps:
break
except tf.errors.OutOfRangeError as e:
print("out of range")
finally:
print("finish")
end_time = time.time()
train_time = end_time - begin_time
print("Training elapsed time %fs" % train_time)
val_feed = {x:mnist.validation.images, y_:mnist.validation.labels}
val_xent = sess.run(cross_entropy, feed_dict=val_feed)
step = sess.run(global_step)
print("After %d training step(s), validation cross entropy = %g" % (step, val_xent))
if __name__ == "__main__":
tf.app.run()
'''
异步训练
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=0 &
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=1 &
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=0 --train_steps=1000
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=1 --train_steps=1000
output:
...
1627614680.971620: Worker 0: training step 1000 done (global_step: 1443)
1627614680.974967: Worker 0: training step 1001 done (global_step: 1445)
Training elapsed time 3.548831s
After 1450 training step(s), validation cross entropy = 623.179
同步训练:
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=0 &
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=1 &
先启动worker1, 再启动worker0
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=1 --train_steps=5000 --sync_replicas=true
python dist.py --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=0 --train_steps=5000 --sync_replicas=true
'''
input.py
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tensorflow.python.platform
import numpy
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
"""Download the data from Yann's website, unless it's already here."""
if not os.path.exists(work_directory):
os.mkdir(work_directory)
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_labels(filename, one_hot=False):
"""Extract the labels into a 1D uint8 numpy array [index]."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
if one_hot:
return dense_to_one_hot(labels)
return labels
class DataSet(object):
def __init__(self, images, labels, fake_data=False, one_hot=False,
dtype=tf.float32):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`.
"""
dtype = tf.as_dtype(dtype).base_dtype
if dtype not in (tf.uint8, tf.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape,
labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == tf.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)]
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
class DataSets(object):
pass
data_sets = DataSets()
if fake_data:
def fake():
return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
data_sets.train = fake()
data_sets.validation = fake()
data_sets.test = fake()
return data_sets
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
VALIDATION_SIZE = 5000
local_file = maybe_download(TRAIN_IMAGES, train_dir)
train_images = extract_images(local_file)
local_file = maybe_download(TRAIN_LABELS, train_dir)
train_labels = extract_labels(local_file, one_hot=one_hot)
local_file = maybe_download(TEST_IMAGES, train_dir)
test_images = extract_images(local_file)
local_file = maybe_download(TEST_LABELS, train_dir)
test_labels = extract_labels(local_file, one_hot=one_hot)
validation_images = train_images[:VALIDATION_SIZE]
validation_labels = train_labels[:VALIDATION_SIZE]
train_images = train_images[VALIDATION_SIZE:]
train_labels = train_labels[VALIDATION_SIZE:]
data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
data_sets.validation = DataSet(validation_images, validation_labels,
dtype=dtype)
data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
return data_sets
基于supervisor的单机版
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
a = tf.Variable(0, dtype=tf.int32)
adder = tf.assign_add(a, 1)
svr = tf.train.Supervisor(logdir="../data/supervisor_log")
with svr.managed_session() as sess:
steps = 0
while not svr.should_stop():
sess.run(adder)
steps+=1
if steps >= 10:
break
print("a =", sess.run(a))