毕业论文代码

毕业论文代码

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import time
from psmpy import PsmPy

from psmpy.plotting import *

df1 = pd.read_excel(‘./dolphinfs_fengjiaqi07/price_distance_data.xlsx’)
df1[‘product_id’] = df1.groupby([‘mt_shop_id’, ‘mt_product_id’]).ngroup()+1
arr1 = df1[‘spu_type’].unique()

def myfunc1(x):
for i in range(len(arr1)):
if x == arr1[i]:
return ‘spu’ + f’{i}’

df1[‘spu_type’] = df1[‘spu_type’].apply(myfunc1)
df1 = df1.iloc[:, [46,4,5,6,10,24,12,13,14,23,27,28,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44]]
df1.drop(df1.columns[0], axis=1, inplace=True)

绘制商品分布饼图

vardf1 = df1.groupby([‘spu_type’])[‘product_id’].count()
fig1 = plt.figure(figsize=(8, 8))
ax1 = fig1.add_subplot(111)
ax1.pie(vardf1, labels=vardf1.index, radius=1.2, autopct=‘%.1f%%’, pctdistance=0.8) #计算百分比,调整百分比标签到圆心距离
ax1.set_title(‘商品spu分布’, fontsize=16)
ax1.title.set_y(1.1)
plt.subplots_adjust(wspace=0.5) #调整ax距离
plt.legend()
plt.show()

vardf1 = df1.groupby([‘shop_type’])[‘product_id’].count()
fig1 = plt.figure(figsize=(8, 8))
ax1 = fig1.add_subplot(111)
ax1.pie(vardf1, labels=vardf1.index, radius=1.2, autopct=‘%.1f%%’, pctdistance=0.8) #计算百分比,调整百分比标签到圆心距离
ax1.set_title(‘商铺等级分布’, fontsize=16)
ax1.title.set_y(1.1)
plt.subplots_adjust(wspace=0.5) #调整ax距离
plt.legend()
plt.show()

vardf1 = df1.groupby([‘mt_shop_star_intv’])[‘product_id’].count()
fig1 = plt.figure(figsize=(8, 8))
ax1 = fig1.add_subplot(111)
ax1.pie(vardf1, labels=vardf1.index, radius=1.2, autopct=‘%.1f%%’, pctdistance=0.8) #计算百分比,调整百分比标签到圆心距离
ax1.set_title(‘商品评价得分分布’, fontsize=16)
ax1.title.set_y(1.1)
plt.subplots_adjust(wspace=0.5) #调整ax距离
plt.legend()
plt.show()

vardf1 = df1.groupby([‘dim_city_rank’])[‘product_id’].count()
fig1 = plt.figure(figsize=(8, 8))
ax1 = fig1.add_subplot(111)
ax1.pie(vardf1, labels=vardf1.index, radius=1.2, autopct=‘%.1f%%’, pctdistance=0.8) #计算百分比,调整百分比标签到圆心距离
ax1.set_title(‘商品售卖城市等级分布’, fontsize=16)
ax1.title.set_y(1.1)
plt.subplots_adjust(wspace=0.5) #调整ax距离
plt.legend()
plt.show()

vardf1 = df1.groupby([‘is_brand’])[‘product_id’].count()
fig1 = plt.figure(figsize=(8, 8))
ax1 = fig1.add_subplot(111)
ax1.pie(vardf1, labels=vardf1.index, radius=1.2, autopct=‘%.1f%%’, pctdistance=0.8) #计算百分比,调整百分比标签到圆心距离
ax1.set_title(‘品牌商品分布’, fontsize=16)
ax1.title.set_y(1.1)
plt.subplots_adjust(wspace=0.5) #调整ax距离
plt.legend()
plt.show()

获得商品描述性统计信息

df1.iloc[:, 1:4].describe()
df1.iloc[:, 9:27].describe()

fill_values = {‘shop_rec_uv_60d’: 0, ‘shop_rec_order_uv_60d’: 0, ‘review_60d_cnt’ : 0, ‘shop_rec_uvctr_60d’: df1[‘shop_rec_uvctr_60d’].mean(),
‘shop_rec_click_order_rate_60d’: df1[‘shop_rec_click_order_rate_60d’].mean(), ‘shop_all_uvcvr_60d’: df1[‘shop_all_uvcvr_60d’].mean(),
‘positive_review_rate_60d’: df1[‘positive_review_rate_60d’].mean(), ‘compete_region_product_num’: df1[‘compete_region_product_num’].mean(),
‘compete_region_shop_num’: df1[‘compete_region_shop_num’].mean(), ‘region_uvcvr_60d’: df1[‘region_uvcvr_60d’].mean(),
‘region_intend_cover_rate_60d’: df1[‘region_intend_cover_rate_60d’].mean(), ‘region_order_cover_rate_60d’: df1[‘region_order_cover_rate_60d’].mean()}
df1.fillna(value=fill_values, inplace=True)

df1 = df1[df1[‘intend_pv’] > 30]
df1 = df1[df1[‘distance_0_75per’] < 15000]
len(df1)

探究商品共存在多少个价格

vardf1 = df1.groupby([‘product_id’])[‘min_sales_price_d’].size()
vardf1.value_counts()

对有过改价行为的商品进行配对

vardf1 = df1.iloc[:, [0, 1, 3]]
vardf2 = pd.DataFrame(columns=[‘product_id’, ‘min_sales_price_d’, ‘distance_0_75per’, ‘after_sale_price’, ‘after_distance_0_75per’])
vardf1[‘price_rank’] = vardf1.groupby([‘product_id’])[‘min_sales_price_d’].rank()
for i in range(1, 7):
df_tmp1 = vardf1[vardf1[‘price_rank’] == i+1]
df_tmp2 = vardf1[vardf1[‘price_rank’] == i]
df_tmp2.rename(columns={
‘min_sales_price_d’ : ‘after_sale_price’,
‘distance_0_75per’ : ‘after_distance_0_75per’
}, inplace=True)
df_tmp3 = pd.merge(df_tmp1, df_tmp2, on=[‘product_id’], how=‘inner’)
vardf2 = pd.concat([vardf2, df_tmp3], axis=0)

vardf2 = vardf2.iloc[:, 0:5]
vardf2.head()
df2 = pd.merge(df1, vardf2, on = [‘product_id’, ‘min_sales_price_d’, ‘distance_0_75per’], how=‘inner’)

对数据做转换,分折扣段划分、做spu类型合并、城市等级合并、星级合并

def varfunc1(x):
if x > 0.95:
return ‘0.95~1’
elif x > 0.85:
return ‘0.85~0.95’
elif x > 0.7:
return ‘0.7~0.85’
elif x > 0.5:
return ‘0.5~0.7’
else:
return ‘0.0~0.5’

df2[‘discount_level’] = (df2[‘after_sale_price’] / df2[‘min_sales_price_d’]).apply(varfunc1)

def func1(str1):
if str1 in [‘spu4’, ‘spu9’, ‘spu11’, ‘spu13’]:
return ‘spu4’
elif str1 in [‘spu8’, ‘spu12’, ‘spu14’, ‘spu15’, ‘spu16’]:
return ‘spu8’
else:
return str(str1)

df2[‘spu_type’] = df2[‘spu_type’].apply(func1)

def func2(str1):
if str1 in [‘D’, ‘E’, ‘F’, ‘Z’]:
return ‘D及以下’
else:
return str(str1)

df2[‘dim_city_rank’] = df2[‘dim_city_rank’].apply(func2)

def func3(x):
if x < 4.0:
return ‘3.5及以下’
else:
return str(x)

df2[‘mt_shop_star_intv’] = df2[‘mt_shop_star_intv’].apply(func3)

varlist1 = [‘spu_type’, ‘shop_type’, ‘mt_shop_star_intv’, ‘dim_city_rank’]
varlist2 = [‘商品spu分布’, ‘商铺等级分布’, ‘商品评价得分分布’, ‘商品售卖城市等级分布’]

fig1, axes1 = plt.subplots(nrows=2, ncols=2, figsize=(12, 12))
for i, ax in enumerate(axes1.flatten()):
varstr1 = varlist1[i]
varstr2 = varlist2[i]
vardf1 = df2.groupby([varstr1])[‘product_id’].count()
ax.pie(vardf1, labels=vardf1.index, radius=1.2, autopct=‘%.1f%%’, pctdistance=0.8) #计算百分比,调整百分比标签到圆心距离
ax.set_title(varstr2, fontsize=16)
ax.title.set_y(1.1)
ax.legend()

plt.subplots_adjust(wspace=0.5) #调整ax距离
plt.show()

挑出未配对的商品,划分两组,用psm做匹配

df3 = df1[~df1[‘product_id’].isin(df2[‘product_id’])]
df3.reset_index(inplace=True)
df3.drop([‘index’], axis=1, inplace=True)
np.random.seed(13)
df3[‘random_group’] = np.random.choice([0,1], size=len(df3))

def func1(str1):
if str1 in [‘spu4’, ‘spu9’, ‘spu11’, ‘spu13’]:
return ‘spu4’
elif str1 in [‘spu8’, ‘spu12’, ‘spu14’, ‘spu15’, ‘spu16’]:
return ‘spu8’
else:
return str(str1)

df3[‘spu_type’] = df3[‘spu_type’].apply(func1)

def func2(str1):
if str1 in [‘D’, ‘E’, ‘F’, ‘Z’]:
return ‘D及以下’
else:
return str(str1)

df3[‘dim_city_rank’] = df3[‘dim_city_rank’].apply(func2)

def func3(x):
if x < 4.0:
return ‘3.5及以下’
else:
return str(x)

df3[‘mt_shop_star_intv’] = df3[‘mt_shop_star_intv’].apply(func3)

举一个例子

vardf1 = df3[(df3[‘spu_type’] == ‘spu0’) & (df3[‘dim_city_rank’] == ‘S’)]
vardf1.drop([‘spu_type’, ‘dim_city_rank’], axis=1, inplace=True)
vardf1 = pd.get_dummies(vardf1)

psm1 = PsmPy(vardf1, treatment=‘random_group’, indx=‘product_id’, exclude = [‘min_sales_price_d’, ‘distance_0_75per’])
psm1.logistic_ps(balance=False)
psm1.predicted_data
psm1.knn_matched(matcher=‘propensity_logit’, replacement=False, caliper=0.01)

绘制倾向性得分图

psm1.plot_match(Title=‘Matching Result’, Ylabel=‘# of obs’, Xlabel= ‘propensity logit’, names = [‘treatment’, ‘control’])

varlist1 = [‘spu0’, ‘spu1’, ‘spu2’, ‘spu3’, ‘spu4’, ‘spu6’, ‘spu7’, ‘spu8’, ‘spu5’, ‘spu10’]
varlist2 = [‘C’, ‘B’, ‘A’, ‘D及以下’, ‘S’]
match_df1 = pd.DataFrame(columns=[‘product_id’, ‘matched_ID’])

for i in range(len(varlist1)):
varstr1 = varlist1[i]
for j in range(len(varlist2)):
varstr2 = varlist2[j]
vardf1 = df3[(df3[‘spu_type’] == varstr1) & (df3[‘dim_city_rank’] == varstr2)]
vardf1.drop([‘spu_type’, ‘dim_city_rank’], axis=1, inplace=True)
vardf1 = pd.get_dummies(vardf1)
psm1 = PsmPy(vardf1, treatment=‘random_group’, indx=‘product_id’, exclude = [‘min_sales_price_d’, ‘distance_0_75per’])
psm1.logistic_ps(balance=False)
psm1.knn_matched(matcher=‘propensity_logit’, replacement=False, caliper=0.01)
vardf2 = psm1.matched_ids
match_df1 = pd.concat([match_df1, vardf2], axis=0)

df4 = pd.merge(df3, match_df1, on=‘product_id’, how=‘inner’)

df4 = pd.merge(df4, df3.loc[:, [‘product_id’, ‘min_sales_price_d’, ‘distance_0_75per’]], left_on=‘matched_ID’, right_on=‘product_id’)

df4 = pd.merge(df4, df3, left_on=‘matched_ID’, right_on=‘product_id’)
df4.rename(columns={
‘min_sales_price_d_y’ : ‘after_sale_price’,
‘distance_0_75per_y’ : ‘after_distance_0_75per’,
‘min_sales_price_d_x’ : ‘min_sales_price_d’,
‘distance_0_75per_x’ : ‘distance_0_75per’}, inplace=True)

def swap(row):
if row[‘min_sales_price_d’] < row[‘after_sale_price’]:
temp = row[‘min_sales_price_d’]
row[‘min_sales_price_d’] = row[‘after_sale_price’]
row[‘after_sale_price’] = temp
temp = row[‘distance_0_75per’]
row[‘distance_0_75per’] = row[‘after_distance_0_75per’]
row[‘after_distance_0_75per’] = temp
return row

df4 = df4.apply(swap, axis=1)
def varfunc1(x):
if x > 0.95:
return ‘0.95~1’
elif x > 0.85:
return ‘0.85~0.95’
elif x > 0.7:
return ‘0.7~0.85’
elif x > 0.5:
return ‘0.5~0.7’
else:
return ‘0.0~0.5’

df4[‘discount_level’] = (df4[‘after_sale_price’] / df4[‘min_sales_price_d’]).apply(varfunc1)

from sklearn.model_selection import train_test_split
import lightgbm as lgb
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
from bayes_opt import BayesianOptimization

构建psm-bolightgbm

feature_list = [‘price_gap’
, ‘spu_type_y’
, ‘shop_type_y’
, ‘mt_shop_star_intv_y’
, ‘dim_city_rank_y’
, ‘is_brand_y’
, ‘shop_online_days_y’
, ‘shop_product_num_y’
, ‘prd_online_days_y’
, ‘shop_rec_uv_60d_y’
, ‘shop_rec_uvctr_60d_y’
, ‘shop_rec_order_uv_60d_y’
, ‘shop_rec_click_order_rate_60d_y’
, ‘shop_all_intend_uv_60d_y’
, ‘shop_all_uvcvr_60d_y’
, ‘shop_mul_prd_intend_uv_60d_y’
, ‘shop_mul_prd_uvcvr_60d_y’
, ‘review_60d_cnt_y’
, ‘positive_review_rate_60d_y’
, ‘compete_region_product_num_y’
, ‘compete_region_shop_num_y’
, ‘region_uvcvr_60d_y’
, ‘region_intend_cover_rate_60d_y’
, ‘region_order_cover_rate_60d_y’]

model_df1 = df4
X = model_df1.loc[:, feature_list]
X = pd.get_dummies(X)
Y = model_df1[‘after_distance_0_75per’] - model_df1[‘distance_0_75per’]
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3,random_state=13)

#建立LGB的dataset格式数据
lgb_train = lgb.Dataset(X_train, Y_train)
lgb_eval = lgb.Dataset(X_test, Y_test, reference=lgb_train)

#定义要优化的目标
def LGB_L1_bayesian(num_leaves, learning_rate, feature_fraction,
lambda_l1, lambda_l2, max_depth, bagging_fraction, bagging_freq):

# LightGBM expects next three parameters need to be integer. So we make them integer: num_leaves, max_depth, bagging_freq
num_leaves = int(num_leaves)
max_depth = int(max_depth)
bagging_freq = int(bagging_freq)

param = {
    'num_leaves': num_leaves,
    'learning_rate': learning_rate,
    'bagging_fraction': bagging_fraction,
    'bagging_freq': bagging_freq,
    'feature_fraction': feature_fraction,
    'lambda_l1': lambda_l1,
    'lambda_l2': lambda_l2,
    'max_depth': max_depth,
    'save_binary': True, 
    'seed': 1337,
    'feature_fraction_seed': 1337,
    'bagging_seed': 1337,
    'drop_seed': 1337,
    'data_random_seed': 1337,
    'objective': 'binary',
    'boosting_type': 'gbdt',
    'verbose': 1,
    'metric': 'mae',
    'is_unbalance': True,
    'boost_from_average': False,  
    'objective': 'regression_l1',

}    


num_round = 2000
clf = lgb.train(param, lgb_train, num_round, valid_sets=[lgb_train,lgb_eval], callbacks=callback)

predictions = clf.predict(X_test, num_iteration=clf.best_iteration)   

score = 1 / ( 1 + metrics.mean_absolute_error(Y_test, predictions) )

return score

定义要优化的目标函数

def LGB_L1_bayesian(num_leaves, learning_rate, feature_fraction, lambda_l1, lambda_l2, max_depth, bagging_fraction, bagging_freq):
# LightGBM expects next three parameters need to be integer. So we make them integer: num_leaves, max_depth, bagging_freq

# 设置模型参数
params = {
    'objective': 'regression',
    'metric': 'rmse',
    'num_leaves': int(num_leaves),
    'learning_rate': learning_rate,
    'feature_fraction': feature_fraction,
    'lambda_l1': lambda_l1,
    'lambda_l2': lambda_l2,
    'max_depth': int(max_depth),
    'bagging_fraction': bagging_fraction,
    'bagging_freq': int(bagging_freq),
}

num_round = 2000
clf = lgb.train(params, lgb_train, num_round, valid_sets=[lgb_train,lgb_eval], callbacks=callback)    
predictions = clf.predict(X_test, num_iteration=clf.best_iteration)       
score = 1 / ( 1 + mean_absolute_error(Y_test, predictions) )    
return score

参数范围设定

bounds_LGB_L1 = {
‘num_leaves’: (20, 50),
‘learning_rate’: (0.005, 0.1),
‘feature_fraction’: (0.1, 1),
‘lambda_l1’: (0, 10.0),
‘lambda_l2’: (0, 10.0),
‘max_depth’: (3, 15),
‘bagging_fraction’: (0.2, 1),
‘bagging_freq’: (1, 10),
}

创建 BayesianOptimization 对象

LGB_BO = BayesianOptimization(LGB_L1_bayesian, bounds_LGB_L1)

开始优化

LGB_BO.maximize(init_points=5, n_iter=10) # 你可以根据需要设置初始点和迭代次数

查看最优参数

LGB_BO.max[‘params’]

params = {
‘task’: ‘train’,
‘boosting_type’: ‘gbdt’,
‘objective’: ‘regression_l1’,
‘metric’: ‘mae’,
‘max_depth’: 13,
‘num_leaves’: 25,
‘min_child_samples’: 25,
‘min_child_weight’: 0.001,
‘learning_rate’: 0.070,
‘feature_fraction’: 0.517,
‘bagging_fraction’: 0.988,
‘bagging_freq’: 3,
‘verbose’: -1,
‘reg_alpha’: 0.001,
‘reg_lambda’: 0.01,
‘lambda_l1’: 4.263,
‘lambda_l2’: 3.209
}
callback=[lgb.early_stopping(stopping_rounds=10,verbose=True)]

训练 train

m0 = lgb.train(params, lgb_train, num_boost_round=1000, valid_sets=[lgb_train,lgb_eval],callbacks=callback)

比较LightGBM和BOLightGBM

objective=[‘regression_l2’,‘regression_l1’,‘quantile’,‘mape’]
metrics=[‘l2’,‘mae’,‘quantile’,‘mape’]
metrics_test_data=pd.DataFrame(columns=[‘objective’,‘metric’,‘MAE’])
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),
‘开始目标函数与评估函数评估’)
for i in objective:
for k in metrics:
size=metrics_test_data.size
params = {
‘task’: ‘train’,
‘boosting_type’: ‘gbdt’,
‘objective’: i,
‘metric’:k,
‘max_depth’: 7,
‘num_leaves’: 31,
‘learning_rate’: 0.1,
‘feature_fraction’: 0.8,
‘bagging_fraction’: 0.8,
‘bagging_freq’: 5,
‘verbose’: -1
}
callback=lgb.early_stopping(stopping_rounds=10,verbose=0)
gbm = lgb.train(params,lgb_train,num_boost_round=2000,
valid_sets=lgb_eval,callbacks=[callback])
Y_pred = gbm.predict(X_test)
metrics_test_data.loc[size]=[i, k, mean_absolute_error(Y_test,Y_pred)]
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),i,‘+’,k,’ 完成评估’,
’ best iteration is:',gbm.best_iteration)

metrics_test_data

neg_median_absolute_percentage_error=make_scorer(median_absolute_percentage_error,
greater_is_better=False)
#开始gridsearch
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),‘开始树深度和叶子结点数GridSearch’)
model_lgb = lgb.LGBMRegressor(objective=‘regression_l1’,
metric=‘mae’,
learning_rate=0.1,
subsample = 0.8,
colsample_bytree = 0.8,
subsample_freq = 5)
params_test1={
‘max_depth’: range(7,11,1),
‘num_leaves’:range(10,90,10)
}
gsearch1 = GridSearchCV(estimator=model_lgb,
param_grid=params_test1,
scoring=neg_median_absolute_percentage_error,
cv=5,
verbose=1,
n_jobs=-1)
gsearch1.fit(X, Y)
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),‘完成树深度和叶子结点数GridSearch’)
print(‘Best parameters found by grid search are:’, gsearch1.best_params_)

neg_median_absolute_percentage_error=make_scorer(median_absolute_percentage_error,
greater_is_better=False)
#开始gridsearch
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),‘开始GridSearch’)
model_lgb = lgb.LGBMRegressor(objective=‘regression_l1’,
metric=‘mae’,
learning_rate=0.1,
subsample = 0.8,
colsample_bytree = 0.8,
subsample_freq = 5)
params_test3={
‘min_child_samples’:[19,20,21,22,23,24,25],
‘min_child_weight’:[0.001,0.002]
}
gsearch1 = GridSearchCV(estimator=model_lgb,
param_grid=params_test3,
scoring=neg_median_absolute_percentage_error,
cv=5,
verbose=1,
n_jobs=-1)
gsearch1.fit(X, Y)
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),‘完成GridSearch’)
print(‘Best parameters found by grid search are:’, gsearch1.best_params_)

neg_median_absolute_percentage_error=make_scorer(median_absolute_percentage_error,
greater_is_better=False)
#开始gridsearch
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),‘开始GridSearch’)
model_lgb = lgb.LGBMRegressor(objective=‘regression_l1’,
metric=‘mae’,
learning_rate=0.1,
subsample = 0.8,
colsample_bytree = 0.8,
subsample_freq = 5)
params_test5={
‘reg_alpha’: [0, 0.001, 0.01, 0.03, 0.08, 0.3],
‘reg_lambda’: [0, 0.001, 0.01, 0.03, 0.08, 0.3]
}
gsearch1 = GridSearchCV(estimator=model_lgb,
param_grid=params_test5,
scoring=neg_median_absolute_percentage_error,
cv=5,
verbose=1,
n_jobs=-1)
gsearch1.fit(X, Y)
print(time.strftime(“%Y-%m-%d %H:%M:%S”, time.localtime()),‘完成GridSearch’)
print(‘Best parameters found by grid search are:’, gsearch1.best_params_)

params = {
‘task’: ‘train’,
‘boosting_type’: ‘gbdt’,
‘objective’: ‘regression_l1’,
‘metric’: ‘mae’,
‘max_depth’: 7,
‘num_leaves’: 10,
‘min_child_samples’: 25,
‘min_child_weight’: 0.001,
‘learning_rate’: 0.1,
‘feature_fraction’: 0.8,
‘bagging_fraction’: 0.8,
‘bagging_freq’: 5,
‘verbose’: -1,
‘reg_alpha’: 0.001,
‘reg_lambda’: 0.01
}
callback=[lgb.early_stopping(stopping_rounds=10,verbose=True)]

训练 train

m1 = lgb.train(params, lgb_train, num_boost_round=1000, valid_sets=[lgb_train,lgb_eval],callbacks=callback)
#预测数据集
Y_pred = m1.predict(X_test)
#评估模型
regression_metrics(Y_test,Y_pred)

X = outer_test_df1.loc[:, feature_list]
X = pd.get_dummies(X)
Y = outer_test_df1[‘after_distance_0_75per’] - outer_test_df1[‘distance_0_75per’]
#预测数据集
Y_pred = m1.predict(X)
#评估模型
regression_metrics(Y, Y_pred)

#定义要优化的目标
def LGB_L1_bayesian(num_leaves, learning_rate, feature_fraction,
lambda_l1, lambda_l2, max_depth, bagging_fraction, bagging_freq):

# LightGBM expects next three parameters need to be integer. So we make them integer: num_leaves, max_depth, bagging_freq
num_leaves = int(num_leaves)
max_depth = int(max_depth)
bagging_freq = int(bagging_freq)

param = {
    'num_leaves': num_leaves,
    'learning_rate': learning_rate,
    'bagging_fraction': bagging_fraction,
    'bagging_freq': bagging_freq,
    'feature_fraction': feature_fraction,
    'lambda_l1': lambda_l1,
    'lambda_l2': lambda_l2,
    'max_depth': max_depth,
    'save_binary': True, 
    'seed': 1337,
    'feature_fraction_seed': 1337,
    'bagging_seed': 1337,
    'drop_seed': 1337,
    'data_random_seed': 1337,
    'objective': 'binary',
    'boosting_type': 'gbdt',
    'verbose': 1,
    'metric': 'mae',
    'is_unbalance': True,
    'boost_from_average': False,  
    'objective': 'regression_l1',

}    


num_round = 2000
clf = lgb.train(param, lgb_train, num_round, valid_sets=[lgb_train,lgb_eval], callbacks=callback)

predictions = clf.predict(X_test, num_iteration=clf.best_iteration)   

score = 1 / ( 1 + metrics.mean_absolute_error(Y_test, predictions) )

return score

定义要优化的目标函数

def LGB_L1_bayesian(num_leaves, learning_rate, feature_fraction, lambda_l1, lambda_l2, max_depth, bagging_fraction, bagging_freq):
# LightGBM expects next three parameters need to be integer. So we make them integer: num_leaves, max_depth, bagging_freq

# 设置模型参数
params = {
    'objective': 'regression',
    'metric': 'rmse',
    'num_leaves': int(num_leaves),
    'learning_rate': learning_rate,
    'feature_fraction': feature_fraction,
    'lambda_l1': lambda_l1,
    'lambda_l2': lambda_l2,
    'max_depth': int(max_depth),
    'bagging_fraction': bagging_fraction,
    'bagging_freq': int(bagging_freq)
}

num_round = 2000
clf = lgb.train(params, lgb_train, num_round, valid_sets=[lgb_train,lgb_eval], callbacks=callback)    
predictions = clf.predict(X_test, num_iteration=clf.best_iteration)       
score = 1 / ( 1 + mean_absolute_error(Y_test, predictions) )    
return score

参数范围设定

bounds_LGB_L1 = {
‘num_leaves’: (20, 50),
‘learning_rate’: (0.005, 0.1),
‘feature_fraction’: (0.1, 1),
‘lambda_l1’: (0, 10.0),
‘lambda_l2’: (0, 10.0),
‘max_depth’: (3, 15),
‘bagging_fraction’: (0.2, 1),
‘bagging_freq’: (1, 10),
}

创建 BayesianOptimization 对象

LGB_BO = BayesianOptimization(LGB_L1_bayesian, bounds_LGB_L1)

开始优化

LGB_BO.maximize(init_points=5, n_iter=10) # 你可以根据需要设置初始点和迭代次数

#查看最优化的score
LGB_BO.max[‘target’]

#查看优化得到的参数
LGB_BO.max[‘params’]

params = {
‘task’: ‘train’,
‘boosting_type’: ‘gbdt’,
‘objective’: ‘regression_l1’,
‘metric’: ‘mae’,
‘max_depth’: 9,
‘num_leaves’: 6,
‘min_child_samples’: 25,
‘min_child_weight’: 0.001,
‘learning_rate’: 0.082,
‘feature_fraction’: 0.699,
‘bagging_fraction’: 0.981,
‘bagging_freq’: 3,
‘verbose’: -1,
‘reg_alpha’: 0.001,
‘reg_lambda’: 0.01
}
callback=[lgb.early_stopping(stopping_rounds=10,verbose=True)]

训练 train

m1 = lgb.train(params, lgb_train, num_boost_round=1000, valid_sets=[lgb_train,lgb_eval],callbacks=callback)
#预测数据集
Y_pred = m1.predict(X_test)
#评估模型
regression_metrics(Y_test,Y_pred)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值