数据挖掘————HeartbeatClassification

该项目是天池比赛的一个,学习记录。

Task4 建模与调参1(建模)

# 导入相关包和相关设置
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score#精确率和召回率的调和平均数

import os
import seaborn as sns
import matplotlib.pyplot as plt

import warnings
warnings.filterwarnings("ignore")
#读取数据
def reduce_mem_usage(df):
    start_mem = df.memory_usage().sum() / 1024**2 
    print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
    
    for col in df.columns:
        col_type = df[col].dtype
        
        if col_type != object:
            c_min = df[col].min()
            c_max = df[col].max()
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                    df[col] = df[col].astype(np.int64)  
            else:
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)
        else:
            df[col] = df[col].astype('category')

    end_mem = df.memory_usage().sum() / 1024**2 
    print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
    print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
    
    return df
data=pd.read_csv('\Train.csv')
data_list=[]
for items in data.values:
    data_list.append([items[0]]+[float(i) for i in items[1].split(',')]+[items[2]])
    
data=pd.DataFrame(np.array(data_list))
data.columns=['id']+['s_'+str(i) for i in range(len(data_list[0])-2)]+['label']
data=reduce_mem_usage(data)

输出为:
Memory usage of dataframe is 157.93 MB
Memory usage after optimization is: 39.67 MB
Decreased by 74.9%
可以看到数据大小减少了很多

简单建模

以下建模的数据集并未构造任何特征,直接使用原特征。
首先对数据集进行划分,用交叉验证法,本次使用的是5折交叉验证

from sklearn.model_selection import KFold
X_train=data.drop(['id','label'],axis=1)
y_train=data['label']

# 5折交叉验证
folds=5
seed=2021
kf=KFold(n_splits=folds,shuffle=True,random_state=seed)

n_splits为分为多少个交叉验证集
shuffle为是否随机,设置为False两次运行结果相同,True则不同
random_state设置为随机因子 ,shuffle为True,random_state赋值两次又相同了

# 树模型中没有f1-score评价指标,所以要自定义指标
def f1_score_vali(preds,data_vali):
    labels=data_vali.get_label()
    preds=np.argmax(preds.reshape(4,-1),axis=0)
#     reshape(4,-1)只给定行数,自己计算列数
    score_vali=f1_score(y_true=labels,y_pred=preds,average='macro')
    return 'f1_score',score_vali,True

使用Lightgbm进行建模

LGB不需要通过所有样本计算信息增益了,而且内置特征降维技术,所以更快。

一列特征[1,nan,1,nan,1]和一列特征[nan,1,nan,1,nan],他们正好可以合并成一列特征[1,2,1,2,1]。
LGB的目标就是在于找到这样的特征并且将他们合并在一起。

此时没有使用交叉验证法


"""对训练集数据进行划分,分成训练集和验证集,并进行相应的操作"""
from sklearn.model_selection import train_test_split
import lightgbm as lgb
# 数据集划分
X_train_split, X_val, y_train_split, y_val = train_test_split(X_train, y_train, test_size=0.2)
train_matrix = lgb.Dataset(X_train_split, label=y_train_split)
valid_matrix = lgb.Dataset(X_val, label=y_val)

params={#所有弱学习器的参数
    "learning_rate":0.1,
    "boosting":'gbdt',
    "lambda_l2":0.1,#正则化参数,加一个参数降低过拟合
    "max_depth":-1,#每个弱学习器也就是决策树的最大深度,-1表示不限制
    "num_leaves":128,#调节树的复杂程度num_leaves<=2^(max_depth)
    "bagging_fraction":0.8,#数据采样
    "feature_fraction":0.8,#特征采样
    "metric":None,
    "objective":"multiclass",
    "num_class":4,
    "nthread":10,
    "verbose":-1,
}

#使用训练集数据进行模型训练
model=lgb.train(params,
               train_set=train_matrix,#带label的训练集
               valid_sets=valid_matrix,#带label的交叉验证集
               num_boost_round=2000,#迭代次数,也就是弱学习器的数量
               verbose_eval=50,
               early_stopping_rounds=200,
               feval=f1_score_vali)

输出:
raining until validation scores don’t improve for 200 rounds
[50] valid_0’s multi_logloss: 0.0484957 valid_0’s f1_score: 0.9583
[100] valid_0’s multi_logloss: 0.043147 valid_0’s f1_score: 0.964937
[150] valid_0’s multi_logloss: 0.0452672 valid_0’s f1_score: 0.966565
[200] valid_0’s multi_logloss: 0.0472698 valid_0’s f1_score: 0.967143
[250] valid_0’s multi_logloss: 0.0489361 valid_0’s f1_score: 0.967116
Early stopping, best iteration is:
[99] valid_0’s multi_logloss: 0.0431298 valid_0’s f1_score: 0.964661
f1_score越大越好

# 对验证集进行预测
val_pre_lgb = model.predict(X_val, num_iteration=model.best_iteration)
preds = np.argmax(val_pre_lgb, axis=1)
score = f1_score(y_true=y_val, y_pred=preds, average='macro')
print('未调参前lightgbm单模型在验证集上的f1:{}'.format(score))

输出:
未调参前lightgbm单模型在验证集上的f1:0.9646606357742953

# 使用5折交叉验证进行模型性能评估
"""使用lightgbm 5折交叉验证进行建模预测"""
cv_scores = []
for i, (train_index, valid_index) in enumerate(kf.split(X_train, y_train)):
    print('************************************ {} ************************************'.format(str(i+1)))
    X_train_split, y_train_split, X_val, y_val = X_train.iloc[train_index], y_train[train_index], X_train.iloc[valid_index], y_train[valid_index]
    
    train_matrix = lgb.Dataset(X_train_split, label=y_train_split)
    valid_matrix = lgb.Dataset(X_val, label=y_val)

    params = {
                "learning_rate": 0.1,
                "boosting": 'gbdt',  
                "lambda_l2": 0.1,
                "max_depth": -1,
                "num_leaves": 128,
                "bagging_fraction": 0.8,
                "feature_fraction": 0.8,
                "metric": None,
                "objective": "multiclass",
                "num_class": 4,
                "nthread": 10,
                "verbose": -1,
            }
    
    model = lgb.train(params, 
                      train_set=train_matrix, 
                      valid_sets=valid_matrix, 
                      num_boost_round=2000, 
                      verbose_eval=100, 
                      early_stopping_rounds=200,
                      feval=f1_score_vali)
    
    val_pred = model.predict(X_val, num_iteration=model.best_iteration)
    
    val_pred = np.argmax(val_pred, axis=1)
    cv_scores.append(f1_score(y_true=y_val, y_pred=val_pred, average='macro'))
    print(cv_scores)

print("lgb_scotrainre_list:{}".format(cv_scores))
print("lgb_score_mean:{}".format(np.mean(cv_scores)))
print("lgb_score_std:{}".format(np.std(cv_scores)))

输出:************************************ 1 ************************************
Training until validation scores don’t improve for 200 rounds
[100] valid_0’s multi_logloss: 0.0408155 valid_0’s f1_score: 0.966797
[200] valid_0’s multi_logloss: 0.0437957 valid_0’s f1_score: 0.971239
Early stopping, best iteration is:
[96] valid_0’s multi_logloss: 0.0406453 valid_0’s f1_score: 0.967452
[0.9674515729721614]
************************************ 2 ************************************
Training until validation scores don’t improve for 200 rounds
[100] valid_0’s multi_logloss: 0.0472933 valid_0’s f1_score: 0.965828
[200] valid_0’s multi_logloss: 0.0514952 valid_0’s f1_score: 0.968138
Early stopping, best iteration is:
[87] valid_0’s multi_logloss: 0.0467472 valid_0’s f1_score: 0.96567
[0.9674515729721614, 0.9656700872844327]
************************************ 3 ************************************
Training until validation scores don’t improve for 200 rounds
[100] valid_0’s multi_logloss: 0.0378154 valid_0’s f1_score: 0.971004
[200] valid_0’s multi_logloss: 0.0405053 valid_0’s f1_score: 0.973736
Early stopping, best iteration is:
[93] valid_0’s multi_logloss: 0.037734 valid_0’s f1_score: 0.970004
[0.9674515729721614, 0.9656700872844327, 0.9700043639844769]
************************************ 4 ************************************
Training until validation scores don’t improve for 200 rounds
[100] valid_0’s multi_logloss: 0.0495142 valid_0’s f1_score: 0.967106
[200] valid_0’s multi_logloss: 0.0542324 valid_0’s f1_score: 0.969746
Early stopping, best iteration is:
[84] valid_0’s multi_logloss: 0.0490886 valid_0’s f1_score: 0.965566
[0.9674515729721614, 0.9656700872844327, 0.9700043639844769, 0.9655663272378014]
************************************ 5 ************************************
Training until validation scores don’t improve for 200 rounds
[100] valid_0’s multi_logloss: 0.0412544 valid_0’s f1_score: 0.964054
[200] valid_0’s multi_logloss: 0.0443025 valid_0’s f1_score: 0.965507
Early stopping, best iteration is:
[96] valid_0’s multi_logloss: 0.0411855 valid_0’s f1_score: 0.963114
[0.9674515729721614, 0.9656700872844327, 0.9700043639844769, 0.9655663272378014, 0.9631137190307674]
lgb_scotrainre_list:[0.9674515729721614, 0.9656700872844327, 0.9700043639844769, 0.9655663272378014, 0.9631137190307674]
lgb_score_mean:0.9663612141019279
lgb_score_std:0.0022854824074775683

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值