generated_images = model.generator_mlp(sample)
num_out = NUM_CLASSES
D_fake, fake_match = model.discriminator_mlp(generated_images, training, num_out=num_out)
D_real_lab, _ = model.discriminator_mlp(batch_images_labeled, training, num_out=num_out)
D_real_unlab, real_match_unlab = model.discriminator_mlp(batch_images_unlabeled, training, num_out=num_out)
fake_match = tf.reduce_mean(fake_match, axis=0)
real_match_unlab = tf.reduce_mean(real_match_unlab, axis=0)
loss_G = tf.reduce_mean(tf.square(fake_match - real_match_unlab))
class_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=D_real_lab, labels=tf.argmax(batch_labels, axis=1))
class_loss = tf.reduce_mean(class_loss)
z_unlab = tf.reduce_sum(tf.exp(D_real_unlab), axis=1)
z_fake = tf.reduce_sum(tf.exp(D_fake), axis=1)
p_unlab = z_unlab / (z_unlab + 1)
p_fake = z_fake / (z_fake + 1)
real_unlab_loss = tf.losses.log_loss(labels=tf.ones_like(p_unlab), predictions=p_unlab)
fake_loss = tf.losses.log_loss(labels=tf.zeros_like(p_fake), predictions=p_fake)
loss_D = .5 * class_loss + real_unlab_loss + fake_loss
g_vars = [var for var in tf.trainable_variables() if 'generator' in var.name]
d_vars = [var for var in tf.trainable_variables() if 'discriminator' in var.name]
opt_G = tf.train.AdamOptimizer(learn_rate, beta1=0.5).minimize(loss_G, var_list=g_vars)
opt_D = tf.train.AdamOptimizer(learn_rate, beta1=0.5).minimize(loss_D, var_list=d_vars)
opts = [opt_D, opt_G]