python调度器_Python可视化各种LR_u调度器(学习率),python,lrscheduler

1.torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max, eta_min=0, last_epoch=-1, verbose=False)

8566f65e401087d2fc8c5599610daafa.png

实现代码:

import torch

import torch.nn as nn

import itertools

import matplotlib.pyplot as plt

initial_lr = 0.1

epochs = 100

# 定义一个简单的模型

class model(nn.Module):

def __init__(self):

super().__init__()

self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3)

def forward(self, x):

pass

if __name__ == '__main__':

net = model()

optimizer = torch.optim.Adam(net.parameters(), lr=initial_lr)

scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5)

print("初始化的学习率:", optimizer.defaults['lr'])

lr_list = [] # 保存学习率

for epoch in range(epochs):

# train

optimizer.zero_grad()

optimizer.step()

# print("第%d个epoch的学习率:%f" % (epoch, optimizer.param_groups[0]['lr']))

lr_list.append(optimizer.param_groups[0]['lr'])

scheduler.step()

# 画出lr的变化

plt.plot(list(range(epochs)), lr_list)

plt.xlabel("epoch")

plt.ylabel("lr")

plt.title("CosineAnnealingLR")

plt.show()

2.CosineAnnealingWarmRestarts

T_mult = 1

1a9e6d8dac0d2737626eb130f326b2ab.png

T_mult = 2

4d1e20f19165e9c06ddd008a13fddc29.png

scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=5, T_mult=1)

3.torch.optim.lr_scheduler.StepLR(optimizer, step_size, gamma=0.1, last_epoch=-1, verbose=False)

3289a9e73d8cdeb98b48e40bef9d9302.png

scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30)

4.torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1, last_epoch=-1, verbose=False)

81a4a3494d34bb15f901ea4e4354edab.png

scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30,80])

5.torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma, last_epoch=-1, verbose=False)

78738b6d59c7746110c255b62173aa72.png

scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma = 0.95)

6.torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr, max_lr, step_size_up=2000, step_size_down=None, mode=‘triangular’, gamma=1.0, scale_fn=None, scale_mode=‘cycle’, cycle_momentum=True, base_momentum=0.8, max_momentum=0.9, last_epoch=-1, verbose=False)

mode = ‘triangular’

ddcdeba743c4df9cfa5ff8fdf6b0a3c4.png

import torch

import torch.nn as nn

import itertools

import matplotlib.pyplot as plt

from utils.transform import test_preprocess

from torchvision import datasets

initial_lr = 0.1

epochs = 10

# 这里为数据集路径

TEST_PATH = r'D:\home_image\test'

data = datasets.ImageFolder(root=TEST_PATH, transform=test_preprocess)

data_loader = torch.utils.data.DataLoader(data, batch_size=25, num_workers=config.num_workers)

# 定义一个简单的模型

class model(nn.Module):

def __init__(self):

super().__init__()

self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3)

def forward(self, x):

pass

if __name__ == '__main__':

net = model()

optimizer = torch.optim.SGD(net.parameters(), lr=initial_lr)

scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1,

mode='triangular',

step_size_up=10, cycle_momentum=True)

print("初始化的学习率:", optimizer.defaults['lr'])

lr_list = [] # 保存学习率

for epoch in range(epochs):

for batch in data_loader:

# train

optimizer.zero_grad()

optimizer.step()

# print("第%d个epoch的学习率:%f" % (epoch, optimizer.param_groups[0]['lr']))

lr_list.append(optimizer.param_groups[0]['lr'])

scheduler.step()

# 画出lr的变化

plt.plot(list(range(epochs*len(data_loader))), lr_list)

plt.xlabel("iteration")

plt.ylabel("lr")

plt.title("CyclicLR-exp_range")

plt.show()

mode = ‘triangular2’

288814073ef8e92cc360cf1763f6d87d.png

scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1,

mode='triangular2',

step_size_up=10, cycle_momentum=True)

mode = ‘exp_range’, gamma = 0.98

5db8cc26f88f06d83f83d3c1d00d7ecc.png

scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.01, max_lr=0.1,

mode='exp_range', gamma=0.98,

step_size_up=10, cycle_momentum=True)

7.torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr, total_steps=None, epochs=None, steps_per_epoch=None, pct_start=0.3, anneal_strategy=‘cos’, cycle_momentum=True, base_momentum=0.85, max_momentum=0.95, div_factor=25.0, final_div_factor=10000.0, last_epoch=-1, verbose=False)

ddd039ff4ab66e2b8e80288acde9e4a5.png

epochs = 20

scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01,

steps_per_epoch=len(data_loader), epochs=epochs)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值