生成网络的损失函数

1. medSynthesis

criterion_bce=nn.BCELoss()

loss_real = criterion_bce(outputD_real,real_label)
loss_real.backward()

loss_fake = criterion_bce(outputD_fake,fake_label)
loss_fake.backward()
            
lossD = loss_real + loss_fake

或者是

outputD_real = netD(labels)
outputD = netD(outputG)
outputD = F.sigmoid(outputD)
lossG_D = opt.lambda_AD*criterion_bce(outputD,real_label) 


outputD_fake = netD(outputG)
outputD_fake = netD(outputG)
outputD_fake = outputD_fake.mean()
lossG_D = opt.lambda_AD*outputD_fake.mean()
lossG_D.backward(mone)

2. HA-GAN

loss_f = nn.BCEWithLogitsLoss()
loss_mse = nn.L1Loss()

y_real_pred = D(real_images_crop, real_images_small, crop_idx)
d_real_loss = loss_f(y_real_pred, real_labels)

fake_images, fake_images_small = G(noise, crop_idx=crop_idx, class_label=None)
y_fake_pred = D(fake_images, fake_images_small, crop_idx)
d_fake_loss = loss_f(y_fake_pred, fake_labels)

d_loss = d_real_loss + d_fake_loss

3. 3dbraingen

G = Generator(noise = latent_dim)
CD = Code_Discriminator(code_size = latent_dim ,num_units = 4096)
D = Discriminator(is_dis=True)
E = Discriminator(out_class = latent_dim,is_dis=False)

criterion_bce = nn.BCELoss()
criterion_l1 = nn.L1Loss()
criterion_mse = nn.MSELoss()


def calc_gradient_penalty(model, x, x_gen, w=10):
    """WGAN-GP gradient penalty"""
    assert x.size()==x_gen.size(), "real and sampled sizes do not match"
    alpha_size = tuple((len(x), *(1,)*(x.dim()-1)))
    alpha_t = torch.cuda.FloatTensor if x.is_cuda else torch.Tensor
    alpha = alpha_t(*alpha_size).uniform_()
    x_hat = x.data*alpha + x_gen.data*(1-alpha)
    x_hat = Variable(x_hat, requires_grad=True)

    def eps_norm(x):
        x = x.view(len(x), -1)
        return (x*x+_eps).sum(-1).sqrt()
    def bi_penalty(x):
        return (x-1)**2

    grad_xhat = torch.autograd.grad(model(x_hat).sum(), x_hat, create_graph=True, only_inputs=True)[0]

    penalty = w*bi_penalty(eps_norm(grad_xhat)).mean()
    return penalty


z_hat = E(real_images).view(_batch_size,-1)
x_hat = G(z_hat)
x_rand = G(z_rand)
x_loss2 = -2*D(real_images).mean()+D(x_hat).mean()+D(x_rand).mean()
gradient_penalty_r = calc_gradient_penalty(D,real_images.data, x_rand.data)
gradient_penalty_h = calc_gradient_penalty(D,real_images.data, x_hat.data)

loss2 = x_loss2+gradient_penalty_r+gradient_penalty_h


4. MEGAN

criterion_GAN = GANLoss(device=device, use_lsgan=True)
criterion_L1 = nn.L1Loss()
if use_lsgan:
    self.loss = nn.MSELoss()
else:
    self.loss = nn.BCELoss()

# setup optimizer
optimizer_G = optim.Adam(net_G.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizer_D = optim.Adam(net_D.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))



fake_D = net_D.forward((HREP.detach(), HRP_fake.detach(), LRP.detach()))
loss_d_fake = criterion_GAN(fake_D.view(batch_size, -1), False)

real_D = net_D.forward((HREP, HRP, LRP))
loss_d_real = criterion_GAN(real_D.view(batch_size, -1), True)

# Combined loss
loss_d = (loss_d_fake + loss_d_real) * 0.5


5. 3DStyleGAN

D_loss  = EasyDict(func_name='training.loss.D_logistic_r1')


def D_logistic_r1(G, D, opt, training_set, minibatch_size, reals, labels, gamma=10.0):
    _ = opt, training_set

    latents = tf.random.normal([minibatch_size] + G.input_shapes[0][1:])

    fake_images_out = G.get_output_for(latents, labels, is_training=True)

    real_scores_out = D.get_output_for(reals, labels, is_training=True)

    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)

    # loss = tf.nn.softplus( tf.cast( fake_scores_out, tf.float64 ) ) # -log(1-sigmoid(fake_scores_out))
    # loss += tf.nn.softplus(-tf.cast( real_scores_out, tf.float64 ) ) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
    # loss = tf.cast( loss, tf.float16 )
    # loss = autosummary( 'Loss/D_Loss', loss )

    loss = tf.nn.softplus( fake_scores_out ) # -log(1-sigmoid(fake_scores_out))
    loss += tf.nn.softplus(-real_scores_out ) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
    loss = autosummary( 'Loss/D_Loss', loss )

    with tf.name_scope('GradientPenalty'):
        real_grads = tf.gradients( tf.reduce_sum( real_scores_out ), [ reals ])[0]
        gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3,4])
        gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)

        # real_grads = tf.gradients( tf.reduce_sum( tf.cast( real_scores_out, tf.float64 ) ), [ reals ])[0]
        # gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3,4])
        # gradient_penalty = autosummary('Loss/gradient_penalty', gradient_penalty)
        # gradient_penalty = tf.cast( gradient_penalty, tf.float16 )

        reg = gradient_penalty * (gamma * 0.5)

        reg = autosummary( 'Loss/D_Reg', reg )

    return loss, reg

6. med-ddpm

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值