class torch.optim.lr_scheduler.MultiStepLR

参考链接: class torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones, gamma=0.1, last_epoch=-1, verbose=False)
配套代码下载链接: 测试学习率调度器.zip

实验代码展示:

# torch.optim.lr_scheduler.MultiStepLR

import matplotlib.pyplot as plt
import numpy as np 
import torch
from torch.utils.data import Dataset, DataLoader
from torch import nn
from torch.autograd import Function
import random
import os
seed = 20200910
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)  # if you are using multi-GPU.
np.random.seed(seed)  # Numpy module.
random.seed(seed)  # Python random module.
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True

class Dataset4cxq(Dataset):
    def __init__(self, length):
        self.length = length

    def __len__(self):
        return self.length
        
    def __getitem__(self, index):
        if type(index) != type(2) and type(index) != (slice):
           raise  TypeError('索引类型错误,程序退出...')
        
        # index 是单个数
        if type(index) == type(2):
            if index >= self.length or index < -1 * self.length:
                # print("索引越界,程序退出...")
                raise IndexError("索引越界,程序退出...")
            elif index < 0:
                index = index + self.length 
            
            Celsius = torch.randn(1,1,dtype=torch.float).item()
            Fahrenheit = 32.0 + 1.8 * Celsius
            return Celsius, Fahrenheit 
    
        
def collate_fn4cxq(batch):
    list_c = []
    list_f = []
    for c, f in batch:
        list_c.append(c)
        list_f.append(f)
    list_c = torch.tensor(list_c)
    list_f = torch.tensor(list_f)
    return list_c, list_f



if __name__ == "__main__":
    my_dataset = Dataset4cxq(32)
    # for c,f in my_dataset:
    #     print(type(c),type(f))
    dataloader4cxq = torch.utils.data.DataLoader(
        dataset=my_dataset, 
        batch_size=8,
        # batch_size=2,
        drop_last=True,
        # drop_last=False,
        shuffle=True,  #  True   False
        # shuffle=False,  #  True   False
        collate_fn=collate_fn4cxq,
        # collate_fn=None,
    )

    # for cnt, data in enumerate(dataloader4cxq, 0):
    #     # pass
    #     sample4cxq, label4cxq = data
    #     print('sample4cxq的类型: ',type(sample4cxq),'\tlabel4cxq的类型: ',type(label4cxq))
    #     print('迭代次数:', cnt, '  sample4cxq:', sample4cxq, '  label4cxq:', label4cxq)

    
    
    
    
    print('开始创建模型'.center(80,'-'))
    model = torch.nn.Linear(in_features=1, out_features=1, bias=True)  # True # False
    model.cuda()
    # optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
    # 模拟华氏度与摄氏度之间的转换  
    # Fahrenheit = 32 + 1.8 * Celsius
    model.train()
    cost_function = torch.nn.MSELoss()
    epochs = 100001  # 100001
    epochs = 10001  # 100001
    print('\n')
    print('开始训练模型'.center(80,'-'))
    list4delta = list()
    list4epoch = list()
    
    # scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=(lambda epoch: 0.99 ** (epoch//1000)))
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[500,1000,2000,3000,4000,5000], gamma=0.7)
    
    for epoch in range(epochs):
        # with torch.no_grad():
        #     Celsius = torch.randn(10,1,dtype=torch.float).cuda()
        #     Fahrenheit = 32.0 + 1.8 * Celsius
        #     Fahrenheit = Fahrenheit.cuda()

        # Celsius = torch.randn(1,1,dtype=torch.float,requires_grad=False).cuda()  # requires_grad=False  True
        # Fahrenheit = 32.0 + 1.8 * Celsius
        # Fahrenheit = Fahrenheit.cuda()        # requires_grad=False
        total_loss = 0.0
        for cnt, data in enumerate(dataloader4cxq, 0):
            Celsius, Fahrenheit = data
            Celsius, Fahrenheit = Celsius.cuda().view(-1,1), Fahrenheit.cuda().view(-1,1)
            output = model(Celsius)
            loss = cost_function(output, Fahrenheit)
            total_loss += loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        scheduler.step()
            
        if epoch % 100 == 0:  # if epoch % 1000 == 0:
            list4delta.append(total_loss)
            list4epoch.append(epoch)
            
        if epoch % 500 == 0:
            info = '\nepoch:{0:>6}/{1:<6}\t'.format(epoch,epochs)
            for k, v in model.state_dict().items():
                info += str(k)+ ':' + '{0:<.18f}'.format(v.item()) + '\t'
                # info += str(k)+ ':' + str(v.item()) + '\t'
            print(info)

    fig, ax = plt.subplots() 
    # ax.plot(10*np.random.randn(100),10*np.random.randn(100),'o')
    ax.plot(list4epoch, list4delta, 'r.-', markersize=8)
    ax.set_title("Visualization For My Model's Errors")
    plt.show()

控制台下输出:

Windows PowerShell
版权所有 (C) Microsoft Corporation。保留所有权利。

尝试新的跨平台 PowerShell https://aka.ms/pscore6

加载个人及系统配置文件用了 785 毫秒。
(base) PS C:\Users\chenxuqi\Desktop\News4cxq\测试学习率调度器> conda activate pytorch_1.7.1_cu102
(pytorch_1.7.1_cu102) PS C:\Users\chenxuqi\Desktop\News4cxq\测试学习率调度器>  & 'D:\Anaconda3\envs\pytorch_1.7.1_cu102\python.exe' 'c:\Users\chenxuqi\.vscode\extensions\ms-python.python-2021.1.502429796\pythonFiles\lib\python\debugpy\launcher' '49956' '--' 'c:\Users\chenxuqi\Desktop\News4cxq\测试学习率调度器\test09.py'       
-------------------------------------开始创建模型-------------------------------------


-------------------------------------开始训练模型-------------------------------------

epoch:     0/10001      weight:0.953605473041534424     bias:1.016030073165893555       

epoch:   500/10001      weight:1.896140336990356445     bias:18.054914474487304688      

epoch:  1000/10001      weight:1.829766988754272461     bias:26.775539398193359375      

epoch:  1500/10001      weight:1.798476338386535645     bias:30.794200897216796875      

epoch:  2000/10001      weight:1.799080491065979004     bias:31.971813201904296875

epoch:  2500/10001      weight:1.799995541572570801     bias:31.999830245971679688

epoch:  3000/10001      weight:1.800000548362731934     bias:31.999938964843750000

epoch:  3500/10001      weight:1.799999237060546875     bias:31.999967575073242188

epoch:  4000/10001      weight:1.800000667572021484     bias:31.999988555908203125

epoch:  4500/10001      weight:1.800000309944152832     bias:31.999994277954101562

epoch:  5000/10001      weight:1.799999833106994629     bias:31.999998092651367188

epoch:  5500/10001      weight:1.799999952316284180     bias:32.000000000000000000

epoch:  6000/10001      weight:1.799994111061096191     bias:32.000000000000000000

epoch:  6500/10001      weight:1.800000786781311035     bias:32.000000000000000000

epoch:  7000/10001      weight:1.799997925758361816     bias:31.999994277954101562

epoch:  7500/10001      weight:1.799974083900451660     bias:31.999967575073242188

epoch:  8000/10001      weight:1.800000190734863281     bias:32.000000000000000000

epoch:  8500/10001      weight:1.799630641937255859     bias:32.000358581542968750

epoch:  9000/10001      weight:1.800010323524475098     bias:32.000026702880859375

epoch:  9500/10001      weight:1.800014019012451172     bias:32.000003814697265625

epoch: 10000/10001      weight:1.800015211105346680     bias:32.000274658203125000

运行结果:
在这里插入图片描述

在这里插入图片描述

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值