配置阿里云docker Tensorflow镜像下载 dataset准备和地址 mnist_cnn.py例程

34 篇文章 3 订阅
10 篇文章 1 订阅

john@john-wang:~/tf2$ docker run -it --rm -v $PWD:/tmp -w /tmp tensorflow/tensorflow

https://account.aliyun.com/login/login.htm
https://cr.console.aliyun.com/cn-hangzhou/new
starwang119
在这里插入图片描述
https://cr.console.aliyun.com/cn-hangzhou/instances/mirrors
在这里插入图片描述
针对Docker客户端版本大于 1.10.0 的用户

您可以通过修改daemon配置文件/etc/docker/daemon.json来使用加速器

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-‘EOF’
{
“registry-mirrors”: [“https://rr0r4ivk.mirror.aliyuncs.com”]
}
EOF

john@john-wang:/$ sudo systemctl daemon-reload
john@john-wang:/$ sudo systemctl restart docker

john@john-virtual-machine:/etc/docker$ docker search nginx
在这里插入图片描述
docker pull xilinx/vitis-ai:latest
john@john-wang:/$ docker pull tensorflow/tensorflow:latest-gpu-jupyter
在这里插入图片描述

https://kitieeae.mirror.aliyuncs.com

  1. 安装/升级Docker客户端
    推荐安装1.10.0以上版本的Docker客户端,参考文档 docker-ce

  2. 配置镜像加速器
    针对Docker客户端版本大于 1.10.0 的用户

您可以通过修改daemon配置文件/etc/docker/daemon.json来使用加速器

# sudo mkdir -p /etc/docker
sudo vim /etc/docker/daemon.json 
{
  "registry-mirrors": ["https://kitieeae.mirror.aliyuncs.com"]
}
sudo systemctl daemon-reload
sudo systemctl restart docker

下载网页和镜像
https://www.tensorflow.org/install/docker#examples_using_cpu-only_images
john@john-wang:/$ docker run -it --rm tensorflow/tensorflow:latest-jupyter
john@john-wang:/$ docker run -it --rm tensorflow/tensorflow

启动镜像并把当前目前镜像设为工作目录:
To run a TensorFlow program developed on the host machine within a container, mount the host directory and change the container’s working directory (-v hostDir:containerDir -w workDir):
john@john-wang:~/tf2$ docker run -it --rm -v $PWD:/tmp -w /tmp tensorflow/tensorflow

在这里插入图片描述

忽略告警
python警告
import warnings
warnings.filterwarnings(“ignore”)

TF警告
python环境下
通过在python文件中添加如下两行代码,设置TensorFlow日志输出级别
import os
os.environ[“TF_CPP_MIN_LOG_LEVEL”] = “3”

TensorFlow的日志级别分为以下三种:
TF_CPP_MIN_LOG_LEVEL = 1 //默认设置,为显示所有信息
TF_CPP_MIN_LOG_LEVEL = 2 //只显示error和warining信息
TF_CPP_MIN_LOG_LEVEL = 3 //只显示error信息
所以,当TensorFlow出现警告信息,又不想让警告信息显示时,可进行如下设置:

dataset准备
# numpy arrays
x = np.arange(0, 10)
# create dataset objects from the arrays, convert to tensor
dx = tf.data.Dataset.from_tensor_slices(x)
# zip the two datasets together
dcomb = tf.data.Dataset.zip((dx, dy)).repeat().batch(3)
# iter
iterator = iter(dcomb)
# next(iter())
for i in range(15):
data = next(iterator)
以上程序成功地将numpy arrays转换成zip iter

mnist_cnn.py例程

import tensorflow as tf
import os
from tensorflow import keras
import numpy as np
import datetime as dt

# import tensorflow.contrib.eager as tfe
# tfe.enable_eager_execution()

(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
xlen = len(x_test)
x_test = tf.Variable(x_test)
x_test = tf.cast(x_test, tf.float32)
x_test = x_test / 255.0
# print(f"x_test {x_test.shape}")
# x_test = tf.reshape(x_test, (xlen, 28, 28, 1))
x_test = tf.reshape(x_test, (len(x_test), 28, 28, 1))

STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard'

def get_batch(x_data, y_data, batch_size):
    idxs = np.random.randint(0, len(y_data), batch_size)
#   print(f"idxs = {idxs}")
    return x_data[idxs,:,:], y_data[idxs]


class ConvLayer(tf.keras.layers.Layer):
    def __init__(self, activation, input_channels, output_channels, window_size, pool_size, filt_stride, pool_stride,
                 initializer=tf.keras.initializers.he_normal()):
        super(ConvLayer, self).__init__()
        self.initializer = initializer
        self.activation = activation
        self.input_channels = input_channels
        self.output_channels = output_channels
        self.window_size = window_size
        self.pool_size = pool_size
        self.filt_stride = filt_stride
        self.pool_stride = pool_stride
        self.w = self.add_weight(shape=(window_size[0], window_size[1], input_channels, output_channels),
                                 initializer=self.initializer,
                                 trainable=True)
        self.b = self.add_weight(shape=(output_channels,), initializer=tf.zeros_initializer, trainable=True)

    def call(self, inputs):
        filt_stride = [1, self.filt_stride[0], self.filt_stride[1], 1]
        out_layer = tf.nn.conv2d(inputs, self.w, filt_stride, padding='SAME')
        # add the bias
        out_layer += self.b
        out_layer = self.activation(out_layer)
        pool_shape = [1, self.pool_size[0], self.pool_size[1], 1]
        pool_strides = [1, self.pool_stride[0], self.pool_stride[1], 1]
        out_layer = tf.nn.max_pool(out_layer, ksize=pool_shape, strides=pool_strides, padding='SAME')
        return out_layer

def loss_fn(logits, labels):
    return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))

model = tf.keras.Sequential([
    ConvLayer(tf.nn.relu, 1, 32, [5, 5], [2, 2], [1, 1], [2, 2]),
    ConvLayer(tf.nn.relu, 32, 64, [5, 5], [2, 2], [1, 1], [2, 2]),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(300, activation=tf.nn.relu, kernel_initializer=tf.keras.initializers.he_normal()),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(10, activation=None)
])

optimizer = tf.keras.optimizers.Adam()
iterations = 5 #5000
batch_size = 32
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/MNIST_CNN_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
for i in range(iterations):
    batch_x, batch_y = get_batch(x_train, y_train, batch_size=batch_size)
    # create tensors
    batch_x = tf.Variable(batch_x)
    batch_y = tf.Variable(batch_y)
    batch_y = tf.cast(batch_y, tf.int32)
    # get the images in the right format
    batch_x = tf.cast(batch_x, tf.float32)
    batch_x = batch_x / 255.0
#    print(f"batch_x = {batch_x.shape}")
    batch_x = tf.reshape(batch_x, (batch_size, 28, 28, 1))
    with tf.GradientTape() as tape:
        logits = model(batch_x)
#        print(f"logits = {logits[0]}")
        loss = loss_fn(logits, batch_y) # exp softmax matrix: p=exp/sigma_exp vector:softmax_cross_entropy_with_logits: label*lnp, sum, mean e.g. loss
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    if i % 1 == 0:
        max_idxs = tf.argmax(logits, axis=1)
        print(f"logits = {logits[0]}")
        print(f"logits = {logits.shape}")
        print(max_idxs)
        print(max_idxs.numpy())
        print(batch_y.numpy())
        train_acc = np.sum(max_idxs.numpy() == batch_y.numpy()) / len(batch_y)
        test_logits = model(x_test, training=False)
        max_idxs = tf.argmax(test_logits, axis=1)
        test_acc = np.sum(max_idxs.numpy() == y_test) / len(y_test)
        print(f"Iter: {i}, loss={loss:.3f}, train accuracy={train_acc * 100:.3f}%, test accuracy={test_acc * 100:.3f}%")
        with train_writer.as_default():
            tf.summary.scalar('loss', loss, step=i)
            tf.summary.scalar('train_accuracy', train_acc, step=i)
            tf.summary.scalar('test_accuracy', test_acc, step=i)

# determine the test accuracy
logits = model(x_test, training=False)
max_idxs = tf.argmax(logits, axis=1)
acc = np.sum(max_idxs.numpy() == y_test) / len(y_test)
print("Final test accuracy is {:.2f}%".format(acc * 100))

在这里插入图片描述
Splits and slicing
https://www.tensorflow.org/datasets/splits
(cat_train, cat_valid, cat_test), info = tfds.load(‘cats_vs_dogs’, split=[‘train[:80%]’, ‘train[80%:90%]’, ‘train[-10%:]’], with_info=True, as_supervised=True)
train_10_80pct_ds = tfds.load(‘mnist’, split=‘train[:10%]+train[-80%:]’)

dataset address
https://www.tensorflow.org/datasets/catalog/cats_vs_dogs
https://www.tensorflow.org/datasets/api_docs/python/tfds/load

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值