数据清洗步骤

1. df_train_base.info()

2. df_train_base.duplicated().sum()

3. df_train_base.isnull().sum()

4.df_train_base[‘age’].fillna(df_train_base[‘age’].median(),inplace = True)

5. df_train_base.dropna(inplace= True)

6.df_train_base[‘name’].unique()

7.mapping + drop

 df_train_base['gender'].map({
    "Male":1,
    "Female":0
})
#决定删除
df_train_base.drop('number',axis=1,inplace=True)

8.df_train_base.reset_index(drop=True)

9.类型转换xxx = xxx.astype(int)

10. 数据统计

df_tr = df_train.groupby(['id'])['click'].count()
df_tx = df_train_trx.groupby(['id'])['amt'].sum()

截取‘A00001’————————》 ‘00001’
df_train['id']= df_train['id'].replace('\D','', regex=True).astype(int)

时间探索
截取天数:2023-08-26 12:57:23
df_train['day']=[str(x)[10:13] for x in df_train['time']]
df_train['day']=df_train['day'].astype(int)
df_train.groupby(['id','day'])['day'].agg(['count'])

在这里插入图片描述

tim=df_train.groupby(['id','day'])['day'].agg(['count']).reset_index()

在这里插入图片描述

tim = tim.sort_values(['id','count'], ascending=False) #降序
#取出现频率最高(max(count))的那一行,实现类似于rank() over(order by count desc)
#最后drop调不需要的column - drop('count',axis=1,inplace=True)
df_train_time=df_train_time.groupby('id').first().reset_index() 

在这里插入图片描述

11. Merge

df_train = pd.merge(train_tmp1,df_tmp2,on=['id'])

12. AutoML --autogluon探索合适模型

from autogluon.tabular import TabularDataset, TabularPredictor
train_da = TabularDataset(df_train)
subsample_size = 50000
train_da = train_da.sample(n=subsample_size, random_state=40)
label = 'label'

save_path = 'agModels-predictClass'  # specifies folder to store trained models
predictor = TabularPredictor(label=label, path=save_path).fit(train_da)

13. 优化参数

from sklearn.model_selection import cross_val_score
from lightgbm import LGBMClassifier

scorel=[]
for i in range(0,200,10):
    rfc = LGBMClassifier(n_estimators=i+1,
                                 n_jobs=-1,
                                 random_state=90)
    score = cross_val_score(rfc,train_data,train_target,cv=10).mean()
    scorel.append(score)
print(max(scorel),(scorel.index(max(scorel))*10)+1)
plt.figure(figsize=[20,5])
plt.plot(range(1,201,10),scorel)

=============================================================
from sklearn.model_selection import GridSearchCV
from lightgbm import LGBMRegressor
param_grid = [
{'learning_rate': [0.01,0.03,0.05,0.02],
 'n_estimators':[25,50,75,100]
},
]
lightgbm_reg = LGBMRegressor()
grid_search = GridSearchCV(lightgbm_reg, param_grid, cv=10,
                          scoring='neg_mean_squared_error')
grid_search.fit(X_train, y_train)
print(grid_search.best_params_)
==============================================================
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
param_grid = [
{'max_features': [12,18,24,32],
 'max_depth':[24,32,40],
 'min_samples_leaf':[18,24,32],
 'min_samples_split':[24,32,40]
},
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=10,
                          scoring='neg_mean_squared_error')
grid_search.fit(X_train, y_train)
print(grid_search.best_params_)

from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=181, random_state=42) 
model.fit(X_train, y_train)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值