tensorflow1.x 分布式训练 mnist

苦于网上没有完整的代码,这里直接上代码。代码来自《深入理解tensorflow架构设计与实现》

简单分布式

distribute.py:

import tensorflow as tf
import time
flags = tf.app.flags
flags.DEFINE_string("ps_hosts", "", "comma sep ip:port")
flags.DEFINE_string("worker_hosts", "", "comma sep ip:port")
flags.DEFINE_string("job_name", "", "ps/worker")
flags.DEFINE_integer("task_index", 0, "iths hosts")

   
def main(_):
    workers = flags.FLAGS.worker_hosts.split(",")
    ps = flags.FLAGS.ps_hosts.split(",")

    cluster_spec = tf.train.ClusterSpec({
        "worker":workers,
        "ps":ps
    })
    print("cluster_spec:", cluster_spec, workers, ps)
    server = tf.train.Server(cluster_spec, job_name=flags.FLAGS.job_name, task_index=flags.FLAGS.task_index)
    if flags.FLAGS.job_name == "ps":
        print("ps running...")
        server.join()
        return
    is_chief = (flags.FLAGS.task_index == 0)
    with tf.device("/job:ps/task:0/cpu:0"):
        global_step = tf.Variable(0, tf.int32, name="global_step")
        gadd = tf.assign_add(global_step, 1)
    with tf.device(
        tf.train.replica_device_setter(
            worker_device="/job:worker/task:%d/cpu:%d" % (flags.FLAGS.task_index, 0),
            ps_device="/job:ps/cpu:0",
            cluster=cluster_spec
        )
    ):
        a = tf.Variable(0, dtype=tf.int32, name="a")
        c = a  * a
        adderA = tf.assign_add(a, c)
        printA = tf.Print(a, [a], "a=")
        
        b = tf.Variable(0, dtype=tf.int32, name="b")
        adderB = tf.assign_add(b, 1)
        printB = tf.Print(b, [b], "b=")
        train_op = [a, b, adderA, adderB]
        
        sess_config = tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=True,
            device_filters = ["/job:ps", "/job:worker/task:%d" % flags.FLAGS.task_index]
        )
        print("worker running...")
        svr = tf.train.Supervisor(logdir="./supervisor_log", is_chief=is_chief, init_op=tf.global_variables_initializer(),
            local_init_op=tf.local_variables_initializer(),
            global_step=global_step)
        sess = svr.prepare_or_wait_for_session(server.target, config=sess_config)
        for i in range(10):
            print("run:", i, sess.run(train_op + [gadd]))
            time.sleep(2)
        print("finish, a,b=", sess.run(train_op[:2]))
if __name__ == "__main__":
    tf.app.run()
'''
python distribute.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=0 &
python distribute.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=1 &
python distribute.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=0 &
python distribute.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=1 &

'''

带模型的分布式

dist.py

#-*- coding:utf-8-*-
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
#tf version: 2.5.0-rc3
import time
import math
#from tensorflow.examples.tutorials.mnist import input_data
import input_data
#1.2.1
flags = tf.app.flags
flags.DEFINE_string("ps_hosts", "", "comma sep ip:port")
flags.DEFINE_string("worker_hosts", "", "comma sep ip:port")
flags.DEFINE_string("job_name", "", "ps/worker")
flags.DEFINE_integer("task_index", 0, "iths hosts")
flags.DEFINE_string("data_dir", "./data/mnist_data", "Directory for storing mnist data")
flags.DEFINE_string("train_dir", "./supervisor_log", "Director form storing checkpoint")
flags.DEFINE_integer("replicas_to_aggregate", None, "Number of replicas to aggregate before parameter update is applied (for sync_replicas mode only; default: num_workers)")
flags.DEFINE_integer("hidden_units", 100, "Number of units in hidden layers of the NN")
flags.DEFINE_integer("train_steps", 200, "Number of (global) training steps to perform")
flags.DEFINE_integer("batch_size", 100, "Training batch size")
flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
flags.DEFINE_bool("sync_replicas", False, "Use the sync_replicas(Synchornized replicas)mode, where in the parameter server updates from workers are aggregated before applied to avoid stale gradient")
IMAGE_PIXELS = 28
FLAGS=flags.FLAGS
def main(_):
    if len(FLAGS.job_name) == 0:
        raise ValueError("job_name cannot be empty")
    workers = flags.FLAGS.worker_hosts.split(",")
    ps = flags.FLAGS.ps_hosts.split(",")

    cluster_spec = tf.train.ClusterSpec({
        "worker":workers,
        "ps":ps
    })
    print("cluster_spec:", cluster_spec, workers, ps)
    server = tf.train.Server(cluster_spec, job_name=flags.FLAGS.job_name, task_index=flags.FLAGS.task_index)
    if flags.FLAGS.job_name == "ps":
        print("ps running...")
        server.join()
        return
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    is_chief = (flags.FLAGS.task_index == 0)
    num_workers = len(workers)
    with tf.device(
        tf.train.replica_device_setter(
            worker_device="/job:worker/task:%d/cpu:%d" % (flags.FLAGS.task_index, 0),
            ps_device="/job:ps/cpu:0",
            cluster=cluster_spec
        )
    ):
        global_step = tf.Variable(0, trainable=False, name="global_step")
        #构建神经网络模型[batch_size, IMAGE_PIXELS*IMAGE_PIXELS] [IMAGE_PIXELS*IMAGE_PIXELS, FLAGS.hidden_units] [FLAGS.hidden_units, 10]
        hide_w = tf.Variable(tf.truncated_normal([IMAGE_PIXELS*IMAGE_PIXELS, FLAGS.hidden_units], stddev = 1.0/IMAGE_PIXELS), name="hide_w")
        hide_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hide_b")
        sm_w = tf.Variable(tf.truncated_normal([FLAGS.hidden_units, 10], stddev=1.0/math.sqrt(FLAGS.hidden_units)), name="sm_w")
        sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
        x = tf.placeholder(dtype=tf.float32, shape=[None, IMAGE_PIXELS * IMAGE_PIXELS], name="x")
        y_ = tf.placeholder(dtype=tf.float32, shape=[None, 10], name="y_")
        hide_lin = tf.nn.xw_plus_b(x, hide_w, hide_b)
        hid = tf.nn.relu(hide_lin, name="hide1")
        y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
        cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
        opt = tf.train.AdamOptimizer(FLAGS.learning_rate)

        #同步训练&异步训练优化器不同
        if FLAGS.sync_replicas:
            if FLAGS.replicas_to_aggregate is None:
                print("replicas_to_aggregate is None, set to", num_workers)
                replicas_to_aggregate = num_workers
            else:
                replicas_to_aggregate = FLAGS.replicas_to_aggregate
            opt = tf.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=num_workers, name="mnist_sync_replicas")

        train_op = opt.minimize(cross_entropy, global_step=global_step)
        #同步训练需要初始化更多
        if FLAGS.sync_replicas:
            local_init_op = opt.local_step_init_op
            if is_chief:
                local_init_op = opt.chief_init_op
            ready_for_local_init_op = opt.ready_for_local_init_op
            chief_queue_runner = opt.get_chief_queue_runner()
            sync_init_op = opt.get_init_tokens_op()
        init_op = tf.global_variables_initializer()
        
        #创建supervisor. supervisor 能管理saver, coordinator,初始化变量
        if FLAGS.sync_replicas: #同步模式下要启动其他worker,最后启动chief worker
            sv = tf.train.Supervisor(is_chief=is_chief,
                                        logdir=FLAGS.train_dir,
                                        init_op=init_op,
                                        local_init_op=local_init_op,
                                        ready_for_local_init_op=ready_for_local_init_op,
                                        recovery_wait_secs=1, #设置小点
                                        global_step=global_step
            )
        else:
            sv = tf.train.Supervisor(is_chief=is_chief,
                                        logdir=FLAGS.train_dir,
                                        init_op=init_op,
                                        global_step=global_step
            )
        sess_config = tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=False,
            device_filters = ["/job:ps", "/job:worker/task:%d" % FLAGS.task_index]
        )
        if is_chief:
            print("Worker %d: Initializing session..." % FLAGS.task_index)
        else:
            print("Worker %d: Waiting form session to be initialized..." % FLAGS.task_index)

        sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
        
        #只有chief初始化并启动queue runner
        if is_chief and FLAGS.sync_replicas:
            print("init sync op and queue")
            sess.run(sync_init_op)
            sv.start_queue_runners(sess, [chief_queue_runner])
        begin_time = time.time()
        print("Training begins @ %f" % begin_time)
        local_step = 0
        try:
            while not sv.should_stop(): #开始训练
                batchs_xs, batchs_ys = mnist.train.next_batch(FLAGS.batch_size)
                train_feed = {x:batchs_xs, y_:batchs_ys}
                _, step = sess.run([train_op, global_step], feed_dict=train_feed)
                local_step += 1
                print("%f: Worker %d: training step %d done (global_step: %d) loss=%.4f" % 
                (time.time(), FLAGS.task_index, local_step, step, sess.run(cross_entropy, feed_dict=train_feed)))
                if step > FLAGS.train_steps:
                    break
        except tf.errors.OutOfRangeError as e:
            print("out of range")
                
        finally:
            print("finish")

        end_time = time.time()
        train_time = end_time - begin_time
        print("Training elapsed time %fs" % train_time)
        val_feed = {x:mnist.validation.images, y_:mnist.validation.labels}
        val_xent = sess.run(cross_entropy, feed_dict=val_feed)
        step = sess.run(global_step)
        print("After %d training step(s), validation cross entropy = %g" % (step, val_xent))

if __name__ == "__main__":
    tf.app.run()
'''
异步训练
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=0 &
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=1 &
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=0 --train_steps=1000
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=1 --train_steps=1000

output:
...
1627614680.971620: Worker 0: training step 1000 done (global_step: 1443)
1627614680.974967: Worker 0: training step 1001 done (global_step: 1445)
Training elapsed time 3.548831s
After 1450 training step(s), validation cross entropy = 623.179

同步训练:
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=0 &
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=ps --task_index=1 &
先启动worker1, 再启动worker0
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=1 --train_steps=5000 --sync_replicas=true
python dist.py  --ps_hosts=127.0.0.1:1111,127.0.0.1:2222 --worker_hosts=127.0.0.1:3333,127.0.0.1:4444 --job_name=worker --task_index=0 --train_steps=5000 --sync_replicas=true
'''

input.py

# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for downloading and reading MNIST data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import tensorflow.python.platform
import numpy
from six.moves import urllib
from six.moves import xrange  # pylint: disable=redefined-builtin
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def maybe_download(filename, work_directory):
  """Download the data from Yann's website, unless it's already here."""
  if not os.path.exists(work_directory):
    os.mkdir(work_directory)
  filepath = os.path.join(work_directory, filename)
  if not os.path.exists(filepath):
    filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
    statinfo = os.stat(filepath)
    print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
  return filepath
def _read32(bytestream):
  dt = numpy.dtype(numpy.uint32).newbyteorder('>')
  return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
  """Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
  print('Extracting', filename)
  with gzip.open(filename) as bytestream:
    magic = _read32(bytestream)
    if magic != 2051:
      raise ValueError(
          'Invalid magic number %d in MNIST image file: %s' %
          (magic, filename))
    num_images = _read32(bytestream)
    rows = _read32(bytestream)
    cols = _read32(bytestream)
    buf = bytestream.read(rows * cols * num_images)
    data = numpy.frombuffer(buf, dtype=numpy.uint8)
    data = data.reshape(num_images, rows, cols, 1)
    return data
def dense_to_one_hot(labels_dense, num_classes=10):
  """Convert class labels from scalars to one-hot vectors."""
  num_labels = labels_dense.shape[0]
  index_offset = numpy.arange(num_labels) * num_classes
  labels_one_hot = numpy.zeros((num_labels, num_classes))
  labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
  return labels_one_hot
def extract_labels(filename, one_hot=False):
  """Extract the labels into a 1D uint8 numpy array [index]."""
  print('Extracting', filename)
  with gzip.open(filename) as bytestream:
    magic = _read32(bytestream)
    if magic != 2049:
      raise ValueError(
          'Invalid magic number %d in MNIST label file: %s' %
          (magic, filename))
    num_items = _read32(bytestream)
    buf = bytestream.read(num_items)
    labels = numpy.frombuffer(buf, dtype=numpy.uint8)
    if one_hot:
      return dense_to_one_hot(labels)
    return labels
class DataSet(object):
  def __init__(self, images, labels, fake_data=False, one_hot=False,
               dtype=tf.float32):
    """Construct a DataSet.
    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.
    """
    dtype = tf.as_dtype(dtype).base_dtype
    if dtype not in (tf.uint8, tf.float32):
      raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                      dtype)
    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot
    else:
      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]
      # Convert shape from [num examples, rows, columns, depth]
      # to [num examples, rows*columns] (assuming depth == 1)
      assert images.shape[3] == 1
      images = images.reshape(images.shape[0],
                              images.shape[1] * images.shape[2])
      if dtype == tf.float32:
        # Convert from [0, 255] -> [0.0, 1.0].
        images = images.astype(numpy.float32)
        images = numpy.multiply(images, 1.0 / 255.0)
    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
  @property
  def images(self):
    return self._images
  @property
  def labels(self):
    return self._labels
  @property
  def num_examples(self):
    return self._num_examples
  @property
  def epochs_completed(self):
    return self._epochs_completed
  def next_batch(self, batch_size, fake_data=False):
    """Return the next `batch_size` examples from this data set."""
    if fake_data:
      fake_image = [1] * 784
      if self.one_hot:
        fake_label = [1] + [0] * 9
      else:
        fake_label = 0
      return [fake_image for _ in xrange(batch_size)], [
          fake_label for _ in xrange(batch_size)]
    start = self._index_in_epoch
    self._index_in_epoch += batch_size
    if self._index_in_epoch > self._num_examples:
      # Finished epoch
      self._epochs_completed += 1
      # Shuffle the data
      perm = numpy.arange(self._num_examples)
      numpy.random.shuffle(perm)
      self._images = self._images[perm]
      self._labels = self._labels[perm]
      # Start next epoch
      start = 0
      self._index_in_epoch = batch_size
      assert batch_size <= self._num_examples
    end = self._index_in_epoch
    return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir, fake_data=False, one_hot=False, dtype=tf.float32):
  class DataSets(object):
    pass
  data_sets = DataSets()
  if fake_data:
    def fake():
      return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)
    data_sets.train = fake()
    data_sets.validation = fake()
    data_sets.test = fake()
    return data_sets
  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
  VALIDATION_SIZE = 5000
  local_file = maybe_download(TRAIN_IMAGES, train_dir)
  train_images = extract_images(local_file)
  local_file = maybe_download(TRAIN_LABELS, train_dir)
  train_labels = extract_labels(local_file, one_hot=one_hot)
  local_file = maybe_download(TEST_IMAGES, train_dir)
  test_images = extract_images(local_file)
  local_file = maybe_download(TEST_LABELS, train_dir)
  test_labels = extract_labels(local_file, one_hot=one_hot)
  validation_images = train_images[:VALIDATION_SIZE]
  validation_labels = train_labels[:VALIDATION_SIZE]
  train_images = train_images[VALIDATION_SIZE:]
  train_labels = train_labels[VALIDATION_SIZE:]
  data_sets.train = DataSet(train_images, train_labels, dtype=dtype)
  data_sets.validation = DataSet(validation_images, validation_labels,
                                 dtype=dtype)
  data_sets.test = DataSet(test_images, test_labels, dtype=dtype)
  return data_sets

基于supervisor的单机版

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

a = tf.Variable(0, dtype=tf.int32)
adder = tf.assign_add(a, 1)
svr = tf.train.Supervisor(logdir="../data/supervisor_log")
with svr.managed_session() as sess:
    steps = 0
    while not svr.should_stop():
        sess.run(adder)
        steps+=1
        if steps >= 10:
            break
    print("a =", sess.run(a))

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
### 回答1: tensorflow.keras.datasets.mnist是一个内置的数据集,用于识别手写数字的机器学习任务。该数据集包含了60000张28x28像素的训练图像和10000张测试图像,每张图像都代表一个手写数字(0-9之间)。这个数据集常用于深度学习的图像分类任务。 使用tensorflow.keras.datasets.mnist,可以很方便地加载和使用这个数据集。通过调用load_data()函数,可以将训练和测试数据分别加载到变量中。这些数据已经划分好了训练集和测试集,可以直接用于模型的训练和评估。 加载数据后,可以对图像进行预处理和准备,并构建机器学习模型来识别手写数字。通常,经典的深度学习模型,如卷积神经网络(CNN),在这个任务上表现良好。 在训练模型时,可以使用训练集来调整模型的参数,使其可以准确地预测手写数字。训练集的标签提供了每个图像对应的真实数字,可以用于监督学习。 在模型训练完成后,可以使用测试集来评估模型的性能和准确度。测试集的标签提供了每个测试图像的真实数字,可以与模型的预测结果进行比较,从而得到模型的准确率。 总的来说,tensorflow.keras.datasets.mnist提供了一个方便的方式来获取和使用手写数字数据集,可以用于构建和训练机器学习模型,实现手写数字识别任务。 ### 回答2: tensorflow.keras.datasets.mnist是一个常用的数据集,用于机器学习中数字识别的训练和测试。该数据集包含了60,000个用于训练的手写数字图像和10,000个用于测试的手写数字图像。 这个数据集可以通过tensorflow.keras.datasets模块中的mnist.load_data()函数来加载。这个函数会返回两个元组,分别是训练集和测试集。每个元组都包括了两个numpy数组,一个是图像数组,另一个是对应的标签数组。 训练集包括了60,000个28x28像素的灰度图像,用于训练模型。每个图像数组都是一个形状为(28, 28)的二维numpy数组,表示一个手写数字图像。对应的标签数组是一个形状为(60000,)的一维numpy数组,包含了0到9之间的整数,表示了对应图像的真实数字。 测试集包括了10,000个用于测试模型的手写数字图像,和训练集相似,每个图像数组是一个形状为(28, 28)的二维numpy数组。对应的标签数组是一个形状为(10000,)的一维numpy数组,包含了0到9之间的整数,表示了对应图像的真实数字。 使用这个数据集可以帮助我们训练和评估模型的性能,比如使用卷积神经网络对手写数字进行分类。加载mnist数据集并将其拆分为训练集和测试集后,我们可以使用这些数据来训练模型,并使用测试集来评估模型在未见过的数据上的表现。 总之,tensorflow.keras.datasets.mnist提供了一个方便且广泛使用的手写数字识别数据集,供机器学习研究和实践中使用。 ### 回答3: tensorflow.keras.datasets.mnist是一个常用的数据集,用于机器学习领域中的手写数字识别任务。该数据集包含了60000张28x28像素的训练图像和10000张测试图像。 这个数据集可以通过以下代码导入: ``` (train_images, train_labels), (test_images, test_labels) = tensorflow.keras.datasets.mnist.load_data() ``` 其中train_images和train_labels是训练图像和对应的标签,test_images和test_labels是测试图像和对应的标签。 train_images和test_images都是三维数组,表示图像的像素值。每张图像都由28行28列的像素组成,像素值范围为0-255。 train_labels和test_labels是一维数组,表示图像对应的真实数字标签。标签范围为0-9,分别表示数字0到9。 加载完数据集后,我们可以进行数据预处理,例如将像素值缩放到0-1之间: ``` train_images = train_images / 255.0 test_images = test_images / 255.0 ``` 然后可以使用这些数据来训练机器学习模型,例如使用卷积神经网络进行手写数字识别的训练: ``` model = tensorflow.keras.models.Sequential([ tensorflow.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(28, 28, 1)), tensorflow.keras.layers.MaxPooling2D((2, 2)), tensorflow.keras.layers.Flatten(), tensorflow.keras.layers.Dense(64, activation='relu'), tensorflow.keras.layers.Dense(10, activation='softmax') ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_images, train_labels, epochs=10) ``` 通过这个数据集和训练示例,我们可以建立一个手写数字识别模型,并用测试集进行评估和预测。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值