从Spark.sql读取到Lightgbm模型存储

概要

本文将介绍从Spark.sql读取到Lightgbm模型存储的步骤

整体架构流程

导入必备工具包 、数据读取、数据预处理、模型构建、模型评估、字段筛选、模型存储

技术细节

1.导入必备工具包
from pyspark.conf import SparkConf #SparkConf 包含了spark集群配置的各种参数 
from pyspark.sql import SQLContext #DataFrame和SQL方法的主入口 
from pyspark.sql import SparkSession #启动Spark 
import time #计算运行时间 
import numpy as np 
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import f1_score, confusion_matrix, recall_score, precision_score #导入评估标准

import warnings
warnings.filterwarnings("ignore")
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns',None)

t1 = time.time()

conf = SparkConf()\
.setExecutorEnv("PYHONHASHSEED","123")\
.setMaster("yarn")

spark SparkSession \
.builder \
.config(conf=conf)\
.config("spark.driver.maxResultSize", "64g")\
.config("spark.executor.memory", "16g")\
.config("hive.exec.dynamic.partition", "true")\
.config("hive.exec.dynamic.partition.mode", "nonstrict")\
.config("spark.driver.cores", 2)\
.config("spark.driver.maxResultSize", "32g")\
.config("spark.driver.memory", "32g")\
.config("spark.executor.memory", "45g")\
.config("spark.executor.instances", 16)\
.config("spark.executor.cores", 8)\
.config("spark.kryoserializer.butter.max", "128m")\
.config("spark.network.timeout", "10000000")\
.config("spark.sql.autoBroadcastJoinThreshold", "128")\
.config("spark.sql.broadcastTimeout", "500000")\
.config("spark.sql.shuffle.partitions","800")\
.config("spark.sql.sources,partitionOverwriteMode", "dynamic")\
.config("spark.yarn.am.memory", "16g")\
.config("spark.yarn.am.cores", 2)\
.config("spark.yarn.executor.memoryOverhead", "128g")\
.config("yarn.nodemanager.vmem-check-enabled", "False")\
.config("yarn.nodemanager.pmen-check-enabled", "False")\
.config("spark.dynamicAllocation.maxExecutors", "500")\
.appName("project")\
.enableHiveSupport()\
.getOrCreate()

sc = spark.sparkContext

print(f"spark started and took {time.time()-t1:.05f}s")
2.数据读取
data = spark.sql("""select * from temp.tables""")   # 启用Spark_SQL读取数据
df= data.toPandas()                                 # 将读取的数据转化成Pandas格式
df.shape    
3.数据预处理
# 数据预处理

df.rename(columns = {"flag": 'real_flag'}, inplace = True) # 修改列名
df.head(3)                                                 # 打印前三行

del data                                                        # 删去不用的变量,释放内存,减少内存压力
df['customer_id'] = pd.to_numeric(df['customer_id'], errors="coerce")   # 转化成数据格式(因有脏数据【含中文字母】,故不能直接用int)
df = df.dropna(subset=['customer_id'])

df.isnull().sum() # 查看缺失值情况

# 对年龄缺失值填充中位数;
df["age"].fillna(df["age"].median(), inplace = True) #众数:df["age"].mode()[0]
df["age"].value_counts() # 查看分布情况

for col in df.columns:
    df[col] = df[col].astype("int")    # 数据类型转成int
4.模型构建
y = df['real_flag'].values                         # 真实标签
X = df.drop(['customer_id', 'real_flag'], axis=1)  # 数据集中删去客户id、及真实标签
X.head()

from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=2023) # 分训练集 跟 测试(验证)集

params = {'n_estimators': 1500, 
            'learning_rate': 0.1,
            'max_depth': 15,
            'metric': 'auc',
            'verbose': -1, 
            'seed: 2023,
            'n_jobs':-1

model=LGBMClarsifier(**params) 
model.fit(X_train, y_train,
            eval_set=[(X_train, y_train), (X_test, y_test)], 
            eval_metric = 'auc', 
            verbose=50,
            early_stopping_rounds = 100)
y_pred = model.predict(X_test.num_iteration = model.best_iteration_)
5.模型评估
y_pred = model.predict(X_test)
y_pred_proba = model.predict_proba(X_test)
lgb_acc = model.score(X_test, y_test) * 100
lgb_recall = recall_score(y_test, y_pred) * 100
lgb_precision = precision_score(y_test, y_pred) * 100 I 
lgb_f1 = f1_score(y_test, y_pred, pos_label=1) * 100
print("1gb 准确率:{:.2f}%".format(lgb_acc))
print("lgb 召回率:{:.2f}%".fornat(lgb_recall))
print("lgb 精准率:{:.2f}%".format(lgb_precision))
print("lgb F1分数:{:.2f}%".format(lgb_f1))


#from sklearn.metrics import classification_report
#printf(classification_report(y_test, y_pred))

# 混淆矩阵
plt.title("混淆矩阵", fontsize=21)
data_confusion_matrix = confusion_matrix(y_test, y_pred)
sns.heatmap(data_confusion_matrix, annot=True, cmap='Blues', fmt='d', cbar='False', annot_kws={'size': 28})
plt.xlabel('Predicted 1abel') 
plt.ylabel('True label')


from sklearn.metrics import roc_curve, auc
probs = model.predict_proba(X_test)
preds = probs[:, 1]
fpr, tpr, threshold = roc_curve(y_test, preds)
# 绘制ROC曲线
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.plot([0, 1], [0, 1], 'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive(TPR)')
plt.xlabel('False Positive(FPR)')
plt.title('ROC')
plt.legend(loc='lower right')
plt.show()
6.字段筛选(确定所需字段数量,返回第三步骤操作即可)
import matplotlib.pyplot as plt
import seaborn as sns
warnings.simplefilter(action="ignore", category=FutureWarning)

feature_imp = pd.DataFrame(sorted(zip(model.feature_importances_, X)), columns=['Value', 'Feature'])
plt.figure(figsize=(20, 10))
sns.barplot(x = 'Value', y = 'Feature')
plt.title(" 入模特征重要性得分")
plt.tight_layout()
plt.show()

feature_imp_sort = feature_imp.sort_values(by = "Value", ascending=False)[:]      #[:]限制显示行数
feature_imp_sort

print(feature_imp_sort['Feature'].values)
7.模型存储
import joblib
joblib.dump(model, 'model_joblib.pkl')
  • 9
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值