Two TensorFlow program structure template drafts

Simple one

#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# @Time    : 2018/8/3 10:47
# @File    : tensorflow_template.py
# @Author  : yusisc (yusisc@gmail.com)

import numpy as np
import tensorflow as tf


# model
def model(xin):
    x = xin
    x = tf.layers.dense(x, 32)
    yout = x
    return yout


# hyper-parameters
batch_size = 20000
learning_rate = 0.001
epoch_N = 1000

# placeholder
xx = tf.placeholder(tf.float32, shape=[None, 784])
yy = tf.placeholder(tf.float32, shape=[None])

# loss
loss = tf.nn.softmax_cross_entropy_with_logits(
    labels=yy,
    logits=model(xx)
)

# optimizer, train_op
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()

# writer
writer = tf.summary.FileWriter(logdir='logs')

with tf.Session() as sess:
    sess.run(init_op)
    writer.add_graph(graph=sess.graph)
    writer.flush()

    data = np.random.rand(100, 11)

    iter_let_N = int(data.shape[0] / batch_size)

    for epoch_idx in range(epoch_N):
        for iter_let_idx in range(iter_let_N):
            batch_x, batch_y = \
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 0],\
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 1:]
            _, loss_val = sess.run([train_op, loss],
                                   feed_dict={xx: batch_x,
                                              yy: batch_y})
            print(f'the loss_val of epoch: iter_let_idx ({epoch_idx:5d}:{iter_let_idx:5d}) is {loss_val}')

"""
1. `tf.placeholder` for store epoch_idx.
2. `tf.placeholder` for store `dropout_prob`?
3. Built-in epoch_idx in TensorFlow?
"""

Make use of multiple GPUs

#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# @Time    : 2018/8/3 11:57
# @File    : tensorflow_template_multiGPUs.py
# @Author  : yusisc (yusisc@gmail.com)

import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib


def count_available_gpus():
    local_devices = device_lib.list_local_devices()
    gpu_names = [x.name for x in local_devices if x.device_type == 'GPU']
    gpu_N = len(gpu_names)
    print(f'{gpu_N} GPUs are detected : {gpu_names}')
    return gpu_N

# model
def model(xin, keep_prob=1.0, reuse=False):
    x = xin
    with tf.variable_scope('dense1', reuse=reuse):
        x = tf.layers.dense(x, 32, reuse=reuse)

    yout = x
    return yout


# hyper-parameters
batch_size = 20000
learning_rate = 0.001
epoch_N = 1000
gpu_N = count_available_gpus()

# placeholder
xx = tf.placeholder(tf.float32, shape=[None, 784])
yy = tf.placeholder(tf.float32, shape=[None])

# loss with multiply GPUs
losses = []
xx_split = tf.split(xx, gpu_N)
yy_split = tf.split(yy, gpu_N)
for gpu_id in range(gpu_N):
    with tf.device(tf.DeviceSpec(device_type='GPU', device_index=gpu_id)):
        with tf.variable_scope(tf.get_variable_scope(), reuse=(gpu_id > 0)):
            loss_temp = tf.nn.softmax_cross_entropy_with_logits(
                labels=yy_split[gpu_id],
                logits=model(xx_split[gpu_id], reuse=gpu_id > 0)
            )
            losses.append(loss_temp)
loss = tf.reduce_mean(tf.concat(losses, axis=0))

# optimizer, train_op
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss)
init_op = tf.global_variables_initializer()

# writer
writer = tf.summary.FileWriter(logdir='logs')

with tf.Session() as sess:
    sess.run(init_op)
    writer.add_graph(graph=sess.graph)
    writer.flush()

    data = np.random.rand(100, 11)

    iter_let_N = int(data.shape[0] / batch_size)

    for epoch_idx in range(epoch_N):
        for iter_let_idx in range(iter_let_N):
            batch_x, batch_y = \
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 0],\
                data[iter_let_idx * batch_size:(iter_let_idx + 1) * batch_size, 1:]
            _, loss_val = sess.run([train_op, loss],
                                   feed_dict={xx: batch_x,
                                              yy: batch_y})
            print(f'the loss_val of epoch: iter_let_idx ({epoch_idx:5d}:{iter_let_idx:5d}) is {loss_val}')

ref:

TensorFlow-Multi-GPUs/many-GPUs-MNIST.py at master · golbin/TensorFlow-Multi-GPUs
https://github.com/golbin/TensorFlow-Multi-GPUs/blob/master/many-GPUs-MNIST.py

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值