大部分内容都来自于tf官方教程并经过本人实际测试,官方地址:https://www.tensorflow.org/tutorials/distribute/keras
方式一:
这种方式其实是混杂了custom training loops 和 keras model 两种方式,更纯粹的是只使用 custom training loops 或者 keras 方式去训练。
import tensorflow as tf
from nets.single_posenet import singlePosenet
from configs.spn_config import params
from dataset.dataset import get_dataset
import os
import time
if __name__ == '__main__':
# 控制屏幕的打印信息级别,忽略掉INFO级别的log
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# 程序可以观察到的gpu id
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1'
visible_gpus = tf.config.experimental.list_physical_devices('GPU')
print('Visible devices : ', visible_gpus)
gpu_ids = [0,1]
devices = ['/device:GPU:{}'.format(i) for i in gpu_ids]
print ('Used devices: ', devices)
gpu_nums = len(gpu_ids)
strategy = tf.distribute.MirroredStrategy(devices=devices)
print (strategy.num_replicas_in_sync)
with strategy.scope():
if params['finetune'] is not None:
model = tf.keras.models.load_model(params['finetune'])
print('Successfully restore pretained model from {}'.format(params['finetune']))
else:
inputs = tf.keras.Input(shape=(params['height'], params['width'], 3),name='modelInput')
outputs = singlePosenet(inputs, outc=params['num_joints'] + 1, training=True)
model = tf.keras.Model(inputs, outputs)
optimizer = tf.optimizers.Adam(learning_rate=3e-4)
with strategy.scope():
dataset = get_dataset(gpu_nums)
dist_