数据挖掘HeartbeatClassification——基于lightgbm的简单建模

我们首先来梳理一下搭建模型的过程——

  • import相关的包
  • 读取数据,划分数据集
  • 搭建模型,或是导入模型,设置模型相关参数
  • 将数据导入模型,模型训练的相关参数
  • 评估最优模型

1.整体浏览,了解框架

这部分以#####为界,快速浏览,了解搭建框架

import pandas as pd
import numpy as np
from sklearn.metrics import f1_score

import os
import seaborn as sns
import matplotlib.pyplot as plt

import warnings
warnings.filterwarnings("ignore")


def reduce_mem_usage(df):
    start_mem = df.memory_usage().sum() / 1024 ** 2
    print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))

    for col in df.columns:
        col_type = df[col].dtype

        if col_type != object:
            c_min = df[col].min()
            c_max = df[col].max()
            if str(col_type)[:3] == 'int':
                if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
                    df[col] = df[col].astype(np.int8)
                elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
                    df[col] = df[col].astype(np.int16)
                elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
                    df[col] = df[col].astype(np.int32)
                elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
                    df[col] = df[col].astype(np.int64)
            else:
                if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
                    df[col] = df[col].astype(np.float16)
                elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
                    df[col] = df[col].astype(np.float32)
                else:
                    df[col] = df[col].astype(np.float64)
        else:
            df[col] = df[col].astype('category')

    end_mem = df.memory_usage().sum() / 1024 ** 2
    print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
    print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))

    return df

def f1_score_vali(preds, data_vali):
    labels = data_vali.get_label()
    preds = np.argmax(preds.reshape(4, -1), axis=0)
    score_vali = f1_score(y_true=labels, y_pred=preds, average='macro')
    return 'f1_score', score_vali, True

##########################################
# 读取数据
data = pd.read_csv('data/train.csv')
# 简单预处理
data_list = []
for items in data.values:
    data_list.append([items[0]] + [float(i) for i in items[1].split(',')] + [items[2]])

data = pd.DataFrame(np.array(data_list))
data.columns = ['id'] + ['s_'+str(i) for i in range(len(data_list[0])-2)] + ['label']

data = reduce_mem_usage(data)

#############################################
from sklearn.model_selection import KFold
# 分离数据集,方便进行交叉验证
X_train = data.drop(['id','label'], axis=1)
y_train = data['label']

# 5折交叉验证
folds = 5
seed = 2021
kf = KFold(n_splits=folds, shuffle=True, random_state=seed)


"""对训练集数据进行划分,分成训练集和验证集,并进行相应的操作"""
from sklearn.model_selection import train_test_split
import lightgbm as lgb
# 数据集划分
X_train_split, X_val, y_train_split, y_val = train_test_split(X_train, y_train, test_size=0.2)
train_matrix = lgb.Dataset(X_train_split, label=y_train_split)
valid_matrix = lgb.Dataset(X_val, label=y_val)

###############################
params = {
    "learning_rate": 0.1,
    "boosting": 'gbdt',
    "lambda_l2": 0.1,
    "max_depth": -1,
    "num_leaves": 128,
    "bagging_fraction": 0.8,
    "feature_fraction": 0.8,
    "metric": None,
    "objective": "multiclass",
    "num_class": 4,
    "nthread": 10,
    "verbose": -1,
}
###########################################
"""使用训练集数据进行模型训练"""
model = lgb.train(params,
                  train_set=train_matrix,
                  valid_sets=valid_matrix,
                  num_boost_round=2000,
                  verbose_eval=50,
                  early_stopping_rounds=200,
                  feval=f1_score_vali)

val_pre_lgb = model.predict(X_val, num_iteration=model.best_iteration)
preds = np.argmax(val_pre_lgb, axis=1)
score = f1_score(y_true=y_val, y_pred=preds, average='macro')
print('未调参前lightgbm单模型在验证集上的f1:{}'.format(score))

#################################################################

"""使用lightgbm 5折交叉验证进行建模预测"""
cv_scores = []
for i, (train_index, valid_index) in enumerate(kf.split(X_train, y_train)):
    print('************************************ {} ************************************'.format(str(i + 1)))
    X_train_split, y_train_split, X_val, y_val = X_train.iloc[train_index], y_train[train_index], X_train.iloc[
        valid_index], y_train[valid_index]

    train_matrix = lgb.Dataset(X_train_split, label=y_train_split)
    valid_matrix = lgb.Dataset(X_val, label=y_val)

    params = {
        "learning_rate": 0.1,
        "boosting": 'gbdt',
        "lambda_l2": 0.1,
        "max_depth": -1,
        "num_leaves": 128,
        "bagging_fraction": 0.8,
        "feature_fraction": 0.8,
        "metric": None,
        "objective": "multiclass",
        "num_class": 4,
        "nthread": 10,
        "verbose": -1,
    }

    model = lgb.train(params,
                      train_set=train_matrix,
                      valid_sets=valid_matrix,
                      num_boost_round=2000,
                      verbose_eval=100,
                      early_stopping_rounds=200,
                      feval=f1_score_vali)

    val_pred = model.predict(X_val, num_iteration=model.best_iteration)

    val_pred = np.argmax(val_pred, axis=1)
    cv_scores.append(f1_score(y_true=y_val, y_pred=val_pred, average='macro'))
    print(cv_scores)

print("lgb_scotrainre_list:{}".format(cv_scores))
print("lgb_score_mean:{}".format(np.mean(cv_scores)))
print("lgb_score_std:{}".format(np.std(cv_scores)))

2.分步了解

2.1 读取数据,数据预处理

data = pd.read_csv('data/train.csv')
# 简单预处理
data_list = []
for items in data.values:
    data_list.append([items[0]] + [float(i) for i in items[1].split(',')] + [items[2]])

data = pd.DataFrame(np.array(data_list))
data.columns = ['id'] + ['s_'+str(i) for i in range(len(data_list[0])-2)] + ['label']

data = reduce_mem_usage(data)

reduce_mem_usage()这个函数通过限定数据的类型,减少数据的大小,进而提升计算机对数据的读取速度

比如0.8765432101234567直接用0.87654321代替,数据的精度变小

2.2 分离数据,交叉验证

from sklearn.model_selection import KFold
# 分离数据集,方便进行交叉验证
X_train = data.drop(['id','label'], axis=1)
y_train = data['label']

# 5折交叉验证
folds = 5
seed = 2021
kf = KFold(n_splits=folds, shuffle=True, random_state=seed)

"""对训练集数据进行划分,分成训练集和验证集,并进行相应的操作"""
from sklearn.model_selection import train_test_split
import lightgbm as lgb
# 数据集划分
X_train_split, X_val, y_train_split, y_val = train_test_split(X_train, y_train, test_size=0.2)
train_matrix = lgb.Dataset(X_train_split, label=y_train_split)
valid_matrix = lgb.Dataset(X_val, label=y_val)
2.2.1

什么是交叉验证,详见详解交叉验证

了解一下KFold的参数,KFold(n_splits=’warn’, shuffle=False, random_state=None)
参数——

  • n_splits 表示划分为几块(至少是2)
  • shuffle 表示是否打乱划分,默认False,即不打乱
  • random_state 表示是否固定随机起点,Used when shuffle == True.
2.2.2
X_train_split, X_val, y_train_split, y_val 
= train_test_split(X_train, y_train, test_size=0.2)

参数是X数据集,Y标签集,测试集数据分配的比例。比如test_size=0.2,便是数据集中训练集占总数据集的80%。
返回值便是 X训练数据值,X测试数据值,Y训练标签,Y测试标签。

2.3 设置模型参数

params = {
    "learning_rate": 0.1,#学习率
    "boosting": 'gbdt',#梯度提升决策树
    "lambda_l2": 0.1,#
    "max_depth": -1,
    "num_leaves": 128,#叶节点的数目
    "bagging_fraction": 0.8,
    "feature_fraction": 0.8,#使用特征的子抽样的抽样率
    "metric": None,
    "objective": "multiclass",
    "num_class": 4,
    "nthread": 10,
    "verbose": -1,
}

2.4 训练模型,评估结果

import lightgbm as lgb

lightgbm是boosting集合模型中的新进成员,由微软提供,它和XGBoost一样是对GBDT的高效实现,原理上它和GBDT及XGBoost类似,都采用损失函数的负梯度作为当前决策树的残差近似值,去拟合新的决策树。
详见——LightGBM

"""使用训练集数据进行模型训练"""
model = lgb.train(params,
                  train_set=train_matrix,
                  valid_sets=valid_matrix,
                  num_boost_round=2000,
                  verbose_eval=50,
                  early_stopping_rounds=200,
                  feval=f1_score_vali)

val_pre_lgb = model.predict(X_val, num_iteration=model.best_iteration)
preds = np.argmax(val_pre_lgb, axis=1)
score = f1_score(y_true=y_val, y_pred=preds, average='macro')#评估模型性能
print('未调参前lightgbm单模型在验证集上的f1:{}'.format(score))

2.5 交叉验证,减少训练误差

"""使用lightgbm 5折交叉验证进行建模预测"""
cv_scores = []
for i, (train_index, valid_index) in enumerate(kf.split(X_train, y_train)):
    print('************************************ {} ************************************'.format(str(i + 1)))
    X_train_split, y_train_split, X_val, y_val = X_train.iloc[train_index], y_train[train_index], X_train.iloc[
        valid_index], y_train[valid_index]

    train_matrix = lgb.Dataset(X_train_split, label=y_train_split)
    valid_matrix = lgb.Dataset(X_val, label=y_val)

    params = {
        "learning_rate": 0.1,
        "boosting": 'gbdt',
        "lambda_l2": 0.1,
        "max_depth": -1,
        "num_leaves": 128,
        "bagging_fraction": 0.8,
        "feature_fraction": 0.8,
        "metric": None,
        "objective": "multiclass",
        "num_class": 4,
        "nthread": 10,
        "verbose": -1,
    }

    model = lgb.train(params,
                      train_set=train_matrix,
                      valid_sets=valid_matrix,
                      num_boost_round=2000,
                      verbose_eval=100,
                      early_stopping_rounds=200,
                      feval=f1_score_vali)

    val_pred = model.predict(X_val, num_iteration=model.best_iteration)

    val_pred = np.argmax(val_pred, axis=1)
    cv_scores.append(f1_score(y_true=y_val, y_pred=val_pred, average='macro'))
    print(cv_scores)

print("lgb_scotrainre_list:{}".format(cv_scores))
print("lgb_score_mean:{}".format(np.mean(cv_scores)))
print("lgb_score_std:{}".format(np.std(cv_scores)))

这部分的关键点在这里——

folds = 5
seed = 2021
kf = KFold(n_splits=folds, shuffle=True, random_state=seed)

for i, (train_index, valid_index) in enumerate(kf.split(X_train, y_train))

将数据集平均分成5份,其中每次随机取4份为训练集,1份为测试集,每次取的不一样。
比如——

  • 【1,2,3,4】【5】
  • 【2,3,4,5】【1】
  • 【1,3,4,5】【2】
  • 【1,2,4,5】【3】
  • 【1,2,3,5】【4】

3.测试结果如下

3.1 数据的简单处理

Memory usage of dataframe is 157.93 MB
Memory usage after optimization is: 39.67 MB
Decreased by 74.9%

3.2 未调参的训练结果

##########################################
Training until validation scores don't improve for 200 rounds
[50]	valid_0's multi_logloss: 0.0518171	valid_0's f1_score: 0.954613
[100]	valid_0's multi_logloss: 0.0456245	valid_0's f1_score: 0.962268
[150]	valid_0's multi_logloss: 0.0470006	valid_0's f1_score: 0.965291
[200]	valid_0's multi_logloss: 0.0484298	valid_0's f1_score: 0.966406
[250]	valid_0's multi_logloss: 0.0498757	valid_0's f1_score: 0.966886
Early stopping, best iteration is:
[92]	valid_0's multi_logloss: 0.045533	valid_0's f1_score: 0.96131
未调参前lightgbm单模型在验证集上的f1:0.961309560452836

3.3 调参后的训练结果

###########################################################################

************************************ 1 ************************************
Training until validation scores don't improve for 200 rounds
[100]	valid_0's multi_logloss: 0.0408155	valid_0's f1_score: 0.966797
[200]	valid_0's multi_logloss: 0.0437957	valid_0's f1_score: 0.971239
Early stopping, best iteration is:
[96]	valid_0's multi_logloss: 0.0406453	valid_0's f1_score: 0.967452
[0.9674515729721614]
************************************ 2 ************************************
Training until validation scores don't improve for 200 rounds
[100]	valid_0's multi_logloss: 0.0472933	valid_0's f1_score: 0.965828
[200]	valid_0's multi_logloss: 0.0514952	valid_0's f1_score: 0.968138
Early stopping, best iteration is:
[87]	valid_0's multi_logloss: 0.0467472	valid_0's f1_score: 0.96567
[0.9674515729721614, 0.9656700872844327]
************************************ 3 ************************************
Training until validation scores don't improve for 200 rounds
[100]	valid_0's multi_logloss: 0.0378154	valid_0's f1_score: 0.971004
[200]	valid_0's multi_logloss: 0.0405053	valid_0's f1_score: 0.973736
Early stopping, best iteration is:
[93]	valid_0's multi_logloss: 0.037734	valid_0's f1_score: 0.970004
[0.9674515729721614, 0.9656700872844327, 0.9700043639844769]
************************************ 4 ************************************
Training until validation scores don't improve for 200 rounds
[100]	valid_0's multi_logloss: 0.0495142	valid_0's f1_score: 0.967106
[200]	valid_0's multi_logloss: 0.0542324	valid_0's f1_score: 0.969746
Early stopping, best iteration is:
[84]	valid_0's multi_logloss: 0.0490886	valid_0's f1_score: 0.965566
[0.9674515729721614, 0.9656700872844327, 0.9700043639844769, 0.9655663272378014]
************************************ 5 ************************************
Training until validation scores don't improve for 200 rounds
[100]	valid_0's multi_logloss: 0.0412544	valid_0's f1_score: 0.964054
[200]	valid_0's multi_logloss: 0.0443025	valid_0's f1_score: 0.965507
Early stopping, best iteration is:
[96]	valid_0's multi_logloss: 0.0411855	valid_0's f1_score: 0.963114
[0.9674515729721614, 0.9656700872844327, 0.9700043639844769, 0.9655663272378014, 0.9631137190307674]
lgb_scotrainre_list:[0.9674515729721614, 0.9656700872844327, 0.9700043639844769, 0.9655663272378014, 0.9631137190307674]
lgb_score_mean:0.9663612141019279
lgb_score_std:0.0022854824074775683
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值