神经网络多损失函数联合方式

多损失函数联合

loss_fn_1用于优化网络权重,loss_fn_2用于优化中心矢量

optim_1 = torch.optim.AdamW(weights.parameters(), lr=lr, weight_decay=1e-4)
optim_2 = torch.optim.RMSprop(weights.parameters(), lr=5e-3)
loss_fn_1 = nn.CrossEntropyLoss().to(device)
loss_fn_2 = CenterLoss(6, 6, size_average=True).to(device)
loss_rate = 5e-3
epochs = 15
index_train = 1
index_test = 1
acc = 0.0
start_time = time.time()
metrics_list = []
end_loss = 0

for epoch in range(epochs):
    print(f"开始第{epoch}轮训练")
    weights.train()
    for data in train_transforms_loader:
        img, label = data
        img = img.to(device)
        label = label.to(device)
        output = weights(img).to(device)
        optim_1.zero_grad()
        optim_2.zero_grad()
        loss_1 = loss_fn_1(output, label)
        loss_2 = loss_fn_2(output, label)
        # writer.add_scalar(' train_loss', loss.item(), index_train)
        result_loss = loss_1 + loss_rate * loss_2
        result_loss.backward(retain_graph=True)
        loss_2 *= (1. / loss_rate)
        loss_2.backward()
        optim_1.step()

        optim_2.step()
        if index_train % 10 == 0:
            print(f'index_train{index_train}')
        index_train += 1

如果多个损失项均用于优化网络权重,那么只采用一个优化器即可

optim_1 = torch.optim.AdamW(weights.parameters(), lr=lr, weight_decay=1e-4)
loss_fn_1 = nn.CrossEntropyLoss().to(device)
loss_fn_2 = CenterLoss(6, 6, size_average=True).to(device)
loss_rate = 5e-3
epochs = 15
index_train = 1
index_test = 1
acc = 0.0
start_time = time.time()
metrics_list = []
end_loss = 0

for epoch in range(epochs):
    print(f"开始第{epoch}轮训练")
    weights.train()
    for data in train_transforms_loader:
        img, label = data
        img = img.to(device)
        label = label.to(device)
        output = weights(img).to(device)
        optim_1.zero_grad()
        loss_1 = loss_fn_1(output, label)
        loss_2 = loss_fn_2(output, label)
        # writer.add_scalar(' train_loss', loss.item(), index_train)
        result_loss = loss_1 + loss_rate * loss_2
        result_loss.backward()
        optim_1.step()
        if index_train % 10 == 0:
            print(f'index_train{index_train}')
        index_train += 1
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值
>