建立分布式训练:
# 计算losses:
with tf.device('/gpu:0'):
D_real_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_real,
labels=tf.ones_like(D_logit_real)))
D_fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake,
labels=tf.zeros_like(D_logit_fake)))
D_loss = D_fake_loss + D_real_loss
D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
with tf.device('/gpu:1'):
G_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_logit_fake, labels=tf.ones_like(D_logit_fake)))
G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)
分布式训练结果: