【Python】贝叶斯超参数优化

bayes_opt

https://www.bilibili.com/video/BV1aa41167oS

import numpy as np
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.model_selection import KFold, cross_validate
# 基于bayes_opt库实现贝叶斯优化
from bayes_opt import BayesianOptimization

1. 定义目标函数

'''
1. 目标函数的输入必须是具体的超参数,不能是超参数空间
2. 目标函数的超参数输入值只能是浮点数,不支持整数和字符串。所以字符串型的超参数不能直接传。
3. bayes_opt只支持寻找目标函数最大值.RMSE越小越好,-RMSE越大越好.若是准确率、auc等则保持原样.
'''
def bayesopt_objective(n_estimators,max_depth,max_features,min_impurity_decrease):
    # 定义评估器
    # 调整超参数输入
    reg = RFR(n_estimators=int(n_estimators)
             ,max_depth=int(max_depth)
             ,max_features=int(max_features)
             ,min_impurity_decrease=min_impurity_decrease
             ,random_state=1412
             ,verbose=False #自己定义
             ,n_jobs=-1
             )
    # 定义损失输出,5折交叉验证的结果,输出-RMSE
    cv = KFold(n_splits=5, shuffle=True, random_state=1412)
    validation_loss = cross_validate(reg
                                    ,X,y
                                    ,scoring='neg_root_mean_squared_error'
                                    ,cv=cv
                                    ,verbose=False
                                    ,n_jobs=-1
                                    ,error_score='raise' # 默认nan.如果出错,输出原因
                                    )
    # 最终优化拟合的是交叉验证的值
    return np.mean(validation_loss['test_score'])

2. 定义参数空间

'''
双向闭区间(两边都能取到)
只有上下界,没有步长,自动取浮点数
因此bayes_opt参数空间取值更密,迭代次数更多
'''
param_grid_simple = {'n_estimators':(80,100)
                    ,'max_depth':(10,25)
                    ,'max_features':(10,20)
                    ,'min_impurity_decrease':(0,1)
                    }

3. 优化目标函数的具体流程

'''
涉及随机性,如随机抽取点作为观测点、随机抽取观测点进行采集函数计算
优化算法无法被复现,每次运行必然不一样
最佳超参数组合可以复现,记住优化算法实例化对象的最佳参数组合和最佳分数,交叉验证分割已定义随机种子
'''
def param_bayes_opt(init_points, n_iter):
    # 定义优化器
    opt = BayesianOptimization(bayesopt_objective # 待优化的目标函数
                              ,param_grid_simple # 参数空间
                              ,random_state=1412 # 随机数种子,虽然无法控制
                              )
    # 使用优化器,只支持最大化
    opt.maximize(init_points=init_points # 抽取多少个初始观测值
                 ,n_iter=n_iter # 一共观测/迭代多少次
                 )
    # 优化完成,取出最佳分数和最佳分数
    param_best = opt.max['params']
    score_best = opt.max['target']
    print('\n\n','best_params: ',param_best,
         '\n\n','best_cvscore: ',score_best)
    
    return param_best, score_best

4. 定义验证函数(非必须)

'''
目标函数中设置了随机数种子,交叉验证是规定的,
贝叶斯优化给出的最佳分数一定与与验证的分数相同
'''
def bayes_opt_validation(params_best):
        reg = RFR(n_estimators=int(n_estimators)
             ,max_depth=int(max_depth)
             ,max_features=int(max_features)
             ,min_impurity_decrease=min_impurity_decrease
             ,random_state=1412
             ,verbose=False
             ,n_jobs=-1
             )
        cv = KFold(n_splits=5, shuffle=True, random_state=1412)
        validation_loss = cross_validate(reg
                                        ,X,y
                                        ,scoring='neg_root_mean_squared_error'
                                        ,cv=cv
                                        ,verbose=False
                                        ,n_jobs=-1
                                        ,error_score='raise'
                                        )
        return np.mean(validation_loss['test_score'])

from sklearn.datasets import load_digits

data = load_digits()
X = data.data
y = data.target
print(X.shape,y.shape)
(1797, 64) (1797,)

5. 调用

import time
start = time.time()
params_best, score_best = param_bayes_opt(20,280) # 20初始观测点,迭代280次
print('It takes %s minutes'%((time.time()-start)/60))
validation_score = bayes_opt_validation(params_best)
print('\n validation_score: ',validation_score)

    |   iter    |  target   | max_depth | max_fe... | min_im... | n_esti... |
    -------------------------------------------------------------------------
    | [0m 1       [0m | [0m-1.357   [0m | [0m 23.2    [0m | [0m 17.52   [0m | [0m 0.06379 [0m | [0m 88.79   [0m |
    | [0m 2       [0m | [0m-2.735   [0m | [0m 14.8    [0m | [0m 17.61   [0m | [0m 0.9214  [0m | [0m 97.58   [0m |
    | [0m 3       [0m | [0m-1.941   [0m | [0m 15.86   [0m | [0m 15.56   [0m | [0m 0.2661  [0m | [0m 87.98   [0m |
    | [0m 4       [0m | [0m-1.384   [0m | [0m 14.05   [0m | [0m 16.84   [0m | [0m 0.06744 [0m | [0m 89.72   [0m |
    | [0m 5       [0m | [0m-2.726   [0m | [0m 18.71   [0m | [0m 19.17   [0m | [0m 0.9315  [0m | [0m 83.7    [0m |
    | [0m 6       [0m | [0m-2.483   [0m | [0m 17.7    [0m | [0m 19.58   [0m | [0m 0.7127  [0m | [0m 89.18   [0m |
    | [0m 7       [0m | [0m-2.154   [0m | [0m 14.21   [0m | [0m 12.62   [0m | [0m 0.3381  [0m | [0m 91.51   [0m |
    | [0m 8       [0m | [0m-2.559   [0m | [0m 23.23   [0m | [0m 10.89   [0m | [0m 0.6078  [0m | [0m 95.06   [0m |
    | [0m 9       [0m | [0m-2.776   [0m | [0m 14.89   [0m | [0m 14.0    [0m | [0m 0.9487  [0m | [0m 80.16   [0m |
    | [95m 10      [0m | [95m-1.258   [0m | [95m 11.52   [0m | [95m 12.58   [0m | [95m 0.03276 [0m | [95m 92.56   [0m |
    | [0m 11      [0m | [0m-1.99    [0m | [0m 13.14   [0m | [0m 13.31   [0m | [0m 0.2563  [0m | [0m 98.24   [0m |
    | [0m 12      [0m | [0m-2.263   [0m | [0m 17.94   [0m | [0m 11.48   [0m | [0m 0.3778  [0m | [0m 82.09   [0m |
    | [0m 13      [0m | [0m-2.594   [0m | [0m 16.02   [0m | [0m 17.03   [0m | [0m 0.7735  [0m | [0m 88.31   [0m |
    | [0m 14      [0m | [0m-2.345   [0m | [0m 13.92   [0m | [0m 15.04   [0m | [0m 0.529   [0m | [0m 93.66   [0m |
    | [0m 15      [0m | [0m-2.318   [0m | [0m 12.51   [0m | [0m 13.69   [0m | [0m 0.4482  [0m | [0m 99.9    [0m |
    | [0m 16      [0m | [0m-2.371   [0m | [0m 17.73   [0m | [0m 10.05   [0m | [0m 0.4143  [0m | [0m 82.79   [0m |
    | [0m 17      [0m | [0m-2.785   [0m | [0m 16.6    [0m | [0m 10.84   [0m | [0m 0.9134  [0m | [0m 88.37   [0m |
    | [0m 18      [0m | [0m-2.671   [0m | [0m 21.92   [0m | [0m 15.0    [0m | [0m 0.8219  [0m | [0m 85.86   [0m |
    | [0m 19      [0m | [0m-1.369   [0m | [0m 14.07   [0m | [0m 11.38   [0m | [0m 0.05068 [0m | [0m 91.53   [0m |
    | [0m 20      [0m | [0m-2.581   [0m | [0m 10.35   [0m | [0m 17.38   [0m | [0m 0.7624  [0m | [0m 99.19   [0m |
    


    ---------------------------------------------------------------------------

    StopIteration                             Traceback (most recent call last)

    File E:\Anaconda3\envs\DL\lib\site-packages\bayes_opt\bayesian_optimization.py:179, in BayesianOptimization.maximize(self, init_points, n_iter, acq, kappa, kappa_decay, kappa_decay_delay, xi, **gp_params)
        178 try:
    --> 179     x_probe = next(self._queue)
        180 except StopIteration:
    

    File E:\Anaconda3\envs\DL\lib\site-packages\bayes_opt\bayesian_optimization.py:25, in Queue.__next__(self)
         24 if self.empty:
    ---> 25     raise StopIteration("Queue is empty, no more objects to retrieve.")
         26 obj = self._queue[0]
    

    StopIteration: Queue is empty, no more objects to retrieve.

    
    During handling of the above exception, another exception occurred:
    

    TypeError                                 Traceback (most recent call last)

    Input In [53], in <cell line: 3>()
          1 import time
          2 start = time.time()
    ----> 3 params_best, score_best = param_bayes_opt(20,280) # 20初始观测点,迭代280次
          4 print('It takes %s minutes'%((time.time()-start)/60))
          5 validation_score = bayes_opt_validation(params_best)
    

    Input In [50], in param_bayes_opt(init_points, n_iter)
          9 opt = BayesianOptimization(bayesopt_objective # 待优化的目标函数
         10                           ,param_grid_simple # 参数空间
         11                           ,random_state=1412 # 随机数种子,虽然无法控制
         12                           )
         13 # 使用优化器,只支持最大化
    ---> 14 opt.maximize(init_points=init_points # 抽取多少个初始观测值
         15              ,n_iter=n_iter # 一共观测/迭代多少次
         16              )
         17 # 优化完成,取出最佳分数和最佳分数
         18 param_best = opt.max['params']
    

    File E:\Anaconda3\envs\DL\lib\site-packages\bayes_opt\bayesian_optimization.py:182, in BayesianOptimization.maximize(self, init_points, n_iter, acq, kappa, kappa_decay, kappa_decay_delay, xi, **gp_params)
        180 except StopIteration:
        181     util.update_params()
    --> 182     x_probe = self.suggest(util)
        183     iteration += 1
        185 self.probe(x_probe, lazy=False)
    

    File E:\Anaconda3\envs\DL\lib\site-packages\bayes_opt\bayesian_optimization.py:131, in BayesianOptimization.suggest(self, utility_function)
        128     self._gp.fit(self._space.params, self._space.target)
        130 # Finding argmax of the acquisition function.
    --> 131 suggestion = acq_max(
        132     ac=utility_function.utility,
        133     gp=self._gp,
        134     y_max=self._space.target.max(),
        135     bounds=self._space.bounds,
        136     random_state=self._random_state
        137 )
        139 return self._space.array_to_params(suggestion)
    

    File E:\Anaconda3\envs\DL\lib\site-packages\bayes_opt\util.py:65, in acq_max(ac, gp, y_max, bounds, random_state, n_warmup, n_iter)
         62     continue
         64 # Store it if better than previous minimum(maximum).
    ---> 65 if max_acq is None or -res.fun[0] >= max_acq:
         66     x_max = res.x
         67     max_acq = -res.fun[0]
    

    TypeError: 'float' object is not subscriptable

hyperopt

https://blog.csdn.net/u012735708/article/details/84820101

import numpy as np

from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestRegressor as RFR
from sklearn.ensemble import GradientBoostingRegressor as GBR
from sklearn.model_selection import cross_validate, KFold

# 导入优化算法
import hyperopt
from hyperopt import hp, fmin, tpe, Trials, partial
from hyperopt.early_stop import no_progress_loss
data = load_digits()
X = data.data
y = data.target
print(X.shape,y.shape)

    (1797, 64) (1797,)

1. 建立benchmark

在当前环境、数据集下各种算法得到的结果判断优化是否有效

2. 定义参数init需要的算法

benchmark中效果最好的算法

rf = RFR(n_estimators=89, max_depth=22, 
         max_features=14, min_impurity_decrease=0, 
         random_state=1412, verbose=False, n_jobs=-1)

3. 目标函数

# 目标函数
def hyper_objective(params):
    reg = GBR(n_estimators=int(params['n_estimators']) # int()保证整数值
             ,learning_rate=params['lr']
             ,criterion=params['criterion']
             ,loss=params['loss']
             ,max_depth=int(params['max_depth'])
             ,max_features=params['max_features']
             ,subsample=params['subsample']
             ,min_impurity_decrease=params['min_impurity_decrease']
             ,init=rf
             ,random_state=1412 #GBR中只能控制特征抽样,不控制样本抽样
             ,verbose=False # 打印进程
             )
    cv = KFold(n_splits=5, shuffle=True, random_state=1412)
    validation_loss = cross_validate(reg
                                    ,X,y
                                    # 负跟均方误差 绝对值越小越好
                                    ,scoring='neg_root_mean_squared_error'
                                    ,cv=cv
                                    ,verbose=False
                                    ,n_jobs=-1
                                    ,error_score='raise'
                                    )
    # fmin寻找目标函数最小值,所以评估指标越小越好
    return np.mean(abs(validation_loss['test_score']))

4. 参数空间

在这里插入图片描述

param_grid_simple = {'n_estimators': hp.quniform('n_estimators',25,200,25)
                    ,'lr': hp.quniform('learning_rate',0.05,2.05,0.05)
                    ,'criterion': hp.choice('criterion',['friedman_mse','squared_error','mse','mae'])
                    ,'loss': hp.choice('loss',['squared_error','absolute_error','huber','quantile'])
                    ,'max_depth': hp.quniform('max_depth',2,30,2)
                    ,'subsample': hp.quniform('sub_sample',0.1,0.8,0.1)
                    ,'max_features': hp.choice('max_features',['log2','sqrt',16,32,64,'auto'])
                    ,'min_impurity_decrease': hp.quniform('min_impurity_decrease',0,5,1)
                    }

## 计算参数空间大小
len(range(25,200,25))*len(np.arange(0.05,2.05,0.05))*4*4*len(range(2,30,2))*len(np.arange(0.1,0.8,0.1))*6*len(range(0,5,1))

    13171200
    

5. 优化函数(优化目标函数的具体流程)

def param_hyperopt(max_evals=100):
    # 保存迭代过程
    trials = Trials()
    # 设置提前停止
    # 如果连续100次迭代损失函数值没有下降则停止
    early_stop_fn = no_progress_loss(100)
    # 定义代理模型
    params_best = fmin(hyper_objective
                     ,space=param_grid_simple
                     ,algo=tpe.suggest # 指定搜索算法
                     ,max_evals=max_evals # 最大评估次数
                     ,verbose=True
                     ,trials=trials
                     ,early_stop_fn=early_stop_fn
                     )
    # 打印最优参数,fmin会自动打印最佳分数
    print('\n','\n','best params:',params_best,
         '\n')
    return params_best, trials
    

6. 验证函数(可选)

def hyperopt_validation(params):
    reg = GBR(n_estimators=int(params['n_estimators']) # int()保证整数值
             ,learning_rate=params['lr']
             ,criterion=params['criterion']
             ,loss=params['loss']
             ,max_depth=int(params['max_depth'])
             ,max_features=params['max_features']
             ,subsample=params['subsample']
             ,min_impurity_decrease=params['min_impurity_decrease']
             ,init=rf
             ,random_state=1412 #GBR中只能控制特征抽样,不控制样本抽样
             ,verbose=False # 打印进程
             )
    cv = KFold(n_splits=5, shuffle=True, random_state=1412)
    validation_loss = cross_validate(reg
                                    ,X,y
                                    # 负跟均方误差 绝对值越小越好
                                    ,scoring='neg_root_mean_squared_error'
                                    ,cv=cv
                                    ,verbose=False
                                    ,n_jobs=-1
                                    ,error_score='raise'
                                    )
    return np.mean(abs(validation_loss['test_score']))

7. 训练贝叶斯优化器

params_best, trials = param_hyperopt(30)
'''
    100%|████████████████████████████████████████████████| 30/30 [02:22<00:00,  4.76s/trial, best loss: 0.9290570901285291]
    
     
     best params: {'criterion': 1, 'learning_rate': 0.4, 'loss': 2, 'max_depth': 6.0, 'max_features': 3, 'min_impurity_decrease': 0.0, 'n_estimators': 175.0, 'sub_sample': 0.6000000000000001} 
'''
# 多搜索几次,对于常选中的参数可以在下次搜索中确定取值(修改目标函数)
# 对于参数范围可进一步缩小(按上一次搜索取值向两边扩)
params_best
'''
    {'criterion': 1,
     'learning_rate': 0.4,
     'loss': 2,
     'max_depth': 6.0,
     'max_features': 3,
     'min_impurity_decrease': 0.0,
     'n_estimators': 175.0,
     'sub_sample': 0.6000000000000001}
'''
# Trials用来记录每次eval的时候,具体使用了什么参数以及相关的返回值
print('trials: ',trials.trials[:1])
'''
 trials:  [
 {'state': 2, 'tid': 0, 'spec': None, 
 'result': {'loss': 1.0419885867093803, 'status': 'ok'}, 
 'misc': {'tid': 0, 'cmd': ('domain_attachment', 'FMinIter_Domain'), 'workdir': None, 'idxs': {'criterion': [0], 'learning_rate': [0], 'loss': [0], 'max_depth': [0], 'max_features': [0], 'min_impurity_decrease': [0], 'n_estimators': [0], 'sub_sample': [0]}, 
 'vals': {'criterion': [2], 'learning_rate': [1.9000000000000001], 'loss': [1], 'max_depth': [10.0], 'max_features': [3], 'min_impurity_decrease': [3.0], 'n_estimators': [125.0], 'sub_sample': [0.5]}
 }, 
 'exp_key': None, 
 'owner': None, 
 'version': 0, 
 'book_time': datetime.datetime(2022, 8, 26, 0, 45, 22, 349000), 
 'refresh_time': datetime.datetime(2022, 8, 26, 0, 45, 25, 226000)
 } ]
'''
    
print('每次迭代loss和status:',trials.results[:2])
print('每次迭代loss:',trials.losses()[:2])
print('每次迭代status:',trials.statuses()[:2])
'''
    每次迭代loss和status: [{'loss': 1.0419885867093803, 'status': 'ok'}, {'loss': 0.9290570901285291, 'status': 'ok'}]
    每次迭代loss: [1.0419885867093803, 0.9290570901285291]
    每次迭代status: ['ok', 'ok']
'''
  • 7
    点赞
  • 64
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
PyTorch中使用贝叶斯优化进行超参数优化可以使用以下步骤: 1. 安装依赖 使用PyTorch进行贝叶斯优化需要安装以下依赖: ``` pip install botorch gpytorch ``` 2. 定义搜索空间 定义超参数的搜索空间,可以使用`BoTorch`的`Bounds`和`Categorical`类来表示连续和离散参数。例如: ```python from botorch import Bounds, Categorical from botorch import qNEI from botorch.optim import optimize_acqf bounds = Bounds(torch.tensor([1.0, 0.01]), torch.tensor([100.0, 0.1])) search_space = { "learning_rate": bounds[:, 0], "weight_decay": bounds[:, 1], "optimizer": Categorical([0, 1, 2]), } ``` 这个搜索空间包含两个连续参数(学习率和权重衰减)和一个离散参数(优化器)。 3. 定义代价函数 定义需要优化的代价函数,例如: ```python def train_and_evaluate(config): # 使用config中的参数进行模型训练和评估 return score ``` 4. 定义贝叶斯优化循环 使用`BoTorch`的`BayesianOptimizer`类进行贝叶斯优化循环。例如: ```python from botorch.optim import optimize_acqf from botorch.acquisition import UpperConfidenceBound num_restarts = 10 raw_samples = 512 initial_observations = 8 acq_function = UpperConfidenceBound(model, beta=0.1) optimizer = BayesianOptimizer( search_space=search_space, objective=train_and_evaluate, acquisition_function=acq_function, num_initial_points=initial_observations, ) for iteration in range(num_iterations): # 使用贝叶斯优化进行超参数搜索 batch = optimizer.optimize(num_restarts=num_restarts, raw_samples=raw_samples) # 更新代价函数的历史数据 optimizer.add_new_data(batch) ``` 这个循环中,我们使用`UpperConfidenceBound`作为采集函数,并使用`BayesianOptimizer`进行贝叶斯优化。 5. 获取最佳超参数配置 循环结束后,我们可以使用`BayesianOptimizer`的`get_best_point()`方法获取最佳超参数配置。例如: ```python best_point = optimizer.get_best_point() ``` 这个方法返回一个字典,包含了最佳超参数配置的键值对。 以上就是使用PyTorch进行贝叶斯超参数优化的基本步骤。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值