DW组队学习-二手车交易价格预测Task04:建模调参

本文介绍了在二手车交易价格预测任务中,从导入工具包、数据读取,到建模与评估策略,包括交叉验证、学习率与学习曲线的探讨。在模型调参策略部分,详细讲解了贪心算法、网格搜索和贝叶斯调参的方法,旨在提高模型的预测精度。
摘要由CSDN通过智能技术生成

一 建模调参

Step 1:导入函数工具包
Step 2:读取数据
df_train = pd.read_csv('used_car_train_20200313.csv', sep=' ')
df_test = pd.read_csv('used_car_testA_20200313.csv', sep=' ')
  • 一种减少数据占用内存的方法
def reduce_mem_usage(df):
    """
    通过调整数据类型,帮助我们减少数据在内存中占用的空间
    """
    start_mem = df.memory_usage().sum() # 初始内存分配
    print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
    
    for col in df.columns: # 针对每一列
        col_type = df[col].dtype # 每一列的数据类型
        
        if col_type != object: # 如果不是object类型的
            c_min = df[col].min() # 这一列的最小值
            c_max = df[col].max() # 这一列的最大值
            
            if str(col_type)[:3] == 'int': # 如果是int类型的
                # iinfo(type):整数类型的机器限制 
                # iinfo(np.int8)-->iinfo(min=-128, max=127, dtype=int8)
                # iinfo(np.int16)-->iinfo(min=-32768, max=32767, dtype=int16)
                # iinfo(np.int32)-->iinfo(min=-2147483648, max=2147483647, dtype=int32)
                # iinfo(np.int64)-->iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
                # 若c_min大于-128 且c_max小于127,就转换为np.int8类型
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                    df[col] = df[col].astype(np.int64)
            else:
                # finfo(dtype):浮点类型的机器限制
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)
        else: # 对于object类型
            # category数据类型,通常以string的形式显示,是由固定的且有限数量的变量组成的
            # 包括颜色(红,绿,蓝),尺寸的大小(大,中,小),还有地理信息等(国家,省份)等
            df[col] = df[col].astype('category')
    end_mem = df.memory_usage().sum()
    print('Memory usage after optimization is: {:.2f} MB'.format(end_mem)) # 转化后占用内存
    print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem)) # 减少的内存
    return df
  • 应用
# 训练集转换
sample_train = reduce_mem_usage(df_train)
# 测试集转换
sample_test = reduce_mem_usage(df_test)
  • 对比
Memory usage of dataframe is 37200128.00 MB
Memory usage after optimization is: 10200232.00 MB
Decreased by 72.6%
Memory usage of dataframe is 12000128.00 MB
Memory usage after optimization is: 3200232.00 MB
Decreased by 73.3%
  • 总结:减少占用内存效果很显著,百分之七十多,以后可以在读取数据时进行转换
df_train = reduce_mem_usage(pd.read_csv('used_car_train_20200313.csv', sep=' '))
df_train = reduce_mem_usage(pd.read_csv('used_car_testA_20200313.csv', sep=' '))
Step 3:建模与评估策略
1)交叉验证
  • 数据集(data sets): 训练集(train set)、验证集(validation set)、测试集(test set)
  • 交叉验证(Cross Validation):交叉验证是用来观察模型的稳定性的一种方法
    • 先将数据划分为n份,依次使用其中一份作为测试集,其他n-1份作为训练集;
    • 多次计算模型的精确性来评估模型的平均准确程度;
    • 训练集和测试集的划分会干扰模型的结果,因此用交叉验证n次的结果求出的平均值,是对模型效果的一个更好的度量.
from sklearn.model_selection import cross_val_score
# 通过make_scorer自定义评估函数
from sklearn.metrics import mean_absolute_error, make_scorer #mae

# 闭包
def log_transfer(func):
    def wrapper(y, yhat):
        result = func(np.log(y), np.nan_to_num(np.log(yhat)))
        return result
    return wrapper
# 五折交叉验证
scores = cross_val_score(model
                         , X=train_X
                         , y=train_y # 未处理的标签
                         , verbose=1
                         , cv=5
                         , scoring=make_scorer(log_transfer(mean_absolute_error)) # 评估函数
                        ) 
print(scores)
print('AVG: ', np.mean(scores))
# 用线性回归模型,对未处理标签的特征数据进行五折交叉验证(Error 1.48)

[1.46040217 1.49406311 1.50689246 1.4998853 1.46792771]
AVG: 1.48583415012742

# 五折交叉验证
scores = cross_val_score(model
                         , X=train_X
                         , y=train_y_ln # log(x+1)处理过的标签
                         , verbose=1
                         , cv=5
                         , scoring=make_scorer(log_transfer(mean_absolute_error)) # 评估函数
                        ) 
print(scores)
print('AVG: ', np.mean(scores))
# 用线性回归模型,对处理过标签的特征数据进行五折交叉验证(Error 0.025)

[0.02482099 0.02514258 0.02525959 0.02488387 0.02523581]
AVG: 0.025068566209894654

2)学习率与学习曲线
# 学习率曲线、验证曲线
from sklearn.model_selection import learning_curve, validation_curve
# ?learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,n_jobs=1, train_size=np.linspace(.1, 1.0, 5 )):  
    plt.figure()  
    plt.title(title)  
    if ylim is not None:  
        plt.ylim(*ylim)  
    plt.xlabel('Training example')  
    plt.ylabel('score')  
    train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_size, scoring = make_scorer(mean_absolute_error))  
    train_scores_mean = np.mean(train_scores, axis=1)  
    train_scores_std = np.std(train_scores, axis=1)  
    test_scores_mean = np.mean(test_scores, axis=1)  
    test_scores_std = np.std(test_scores, axis=1)  
    plt.grid()#区域  
    plt.fill_between(train_sizes, train_scores_mean - train_scores_std,  
                     train_scores_mean + train_scores_std, alpha=0.1,  
                     color="r")  
    plt.fill_between(train_sizes, test_scores_mean - test_scores_std,  
                     test_scores_mean + test_scores_std, alpha=0.1,  
                     color="g")  
    plt.plot(train_sizes, train_scores_mean, 'o-', color='r',  
             label="Training score")  
    plt.plot(train_sizes, test_scores_mean,'o-',color="g",  
             label="Cross-validation score")  
    plt.legend(loc="best")  
    return plt
plot_learning_curve(LinearRegression()
                    , 'Liner_model'
                    , train_X[:1000]
                    , train_y_ln[:1000]
                    , ylim=(0.0, 0.5)
                    , cv=5
                    , n_jobs=1)  

在这里插入图片描述

Step 4:模型调参策略
## LGB的参数集合:

objective = ['regression', 'regression_l1', 'mape', 'huber', 'fair']

num_leaves = [3,5,10,15,20,40, 55]
max_depth = [3,5,10,15,20,40, 55]
bagging_fraction = []
feature_fraction = []
drop_rate = []
1)贪心算法调参
best_obj = dict()
for obj in objective:
    model = LGBMRegressor(objective=obj)
    score = np.mean(cross_val_score(model, X=train_X, y=train_y_ln, verbose=0, cv = 5, scoring=make_scorer(mean_absolute_error)))
    best_obj[obj] = score
    
best_leaves = dict()
for leaves in num_leaves:
    model = LGBMRegressor(objective=min(best_obj.items(), key=lambda x:x[1])[0], num_leaves=leaves)
    score = np.mean(cross_val_score(model, X=train_X, y=train_y_ln, verbose=0, cv = 5, scoring=make_scorer(mean_absolute_error)))
    best_leaves[leaves] = score
    
best_depth = dict()
for depth in max_depth:
    model = LGBMRegressor(objective=min(best_obj.items(), key=lambda x:x[1])[0],
                          num_leaves=min(best_leaves.items(), key=lambda x:x[1])[0],
                          max_depth=depth)
    score = np.mean(cross_val_score(model, X=train_X, y=train_y_ln, verbose=0, cv = 5, scoring=make_scorer(mean_absolute_error)))
    best_depth[depth] = score
sns.lineplot(x=['0_initial','1_turning_obj','2_turning_leaves','3_turning_depth'], y=[0.143 ,min(best_obj.values()), min(best_leaves.values()), min(best_depth.values())])

在这里插入图片描述

2)网格搜索调参
from sklearn.model_selection import GridSearchCV
parameters = {'objective': objective , 'num_leaves': num_leaves, 'max_depth': max_depth}
model = LGBMRegressor()
clf = GridSearchCV(model, parameters, cv=5)
clf = clf.fit(train_X, train_y)
clf.best_params_
best_score_
3)贝叶斯调参
from bayes_opt import BayesianOptimization
def rf_cv(num_leaves, max_depth, subsample, min_child_samples):
    val = cross_val_score(
        LGBMRegressor(objective = 'regression_l1',
            num_leaves=int(num_leaves),
            max_depth=int(max_depth),
            subsample = subsample,
            min_child_samples = int(min_child_samples)
        ),
        X=train_X, y=train_y_ln, verbose=0, cv = 5, scoring=make_scorer(mean_absolute_error)
    ).mean()
    return 1 - val
rf_bo = BayesianOptimization(
    rf_cv,
    {
    'num_leaves': (2, 100),
    'max_depth': (2, 100),
    'subsample': (0.1, 1),
    'min_child_samples' : (2, 100)
    }
)
1 - rf_bo.max['target']
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值