#!/usr/bin/env python
# coding: utf-8
# In[1]:
from pyspark.conf import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.sql.functions import lit
from pyspark.sql import functions as F
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark import SparkFiles
import lightgbm as lgb
import xgboost as xgb
import pandas as pd
import numpy as np
import gc
import matplotlib.pyplot as plt
import joblib
import datetime
import dateutil.relativedelta
import pickle
import cloudpickle
from tqdm import tqdm
import sys
from sklearn.metrics import precision_recall_curve, average_precision_score, roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn_pandas import DataFrameMapper
from sklearn2pmml import PMMLPipeline
from sklearn.preprocessing import FunctionTransformer
from sklearn.ensemble import RandomForestClassifier
# from imblearn.over_sampling import SMOTE, SVMSMOTE, ADASYN, BorderlineSMOTE, SMOTENC, SMOTEN, KMeansSMOTE
from sklearn.datasets import make_classification
from tqdm import tqdm
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, QuantileTransformer, LabelEncoder, OrdinalEncoder
from deepctr.layers import custom_objects
from deepctr.models import *
from deepctr.feature_column import SparseFeat, DenseFeat,get_feature_names, build_input_features
import tensorflow as tf
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.python.keras.models import save_model, load_model
from tensorflow.keras.optimizers import Adam, RMSprop
import random
# 启动spark会话
spark = SparkSession \
.builder \
.config("spark.executor.memory", "45g") \
.config("spark.dynamicAllocation.maxExecutors", "200") \
.config("spark.executor.cores", "6") \
.appName("qcptj") \
.enableHiveSupport() \
.getOrCreate()
sc = spark.sparkContext.setLogLevel('Error')
# 计算分区值
lm2ld = datetime.date.today() - datetime.timedelta(1)
par_dt = str(lm2ld).replace('-', '')
par_dt='20231130'
print('par_dt:', par_dt)
# 直接通过hive加载预测训练数据
dfspark_train = spark.sql("select * from table1 where dt='" + par_dt + "'")
# print(dfspark_train.count())
dfspark_test = spark.sql("select * from table2 where dt='" + par_dt + "'")
# print(dfspark_test.count())
id_types_spk_train = pickle.load(open('../data/id_types_spk_test.pkl', 'rb'))
id_types_spk_test = pickle.load(open('../data/id_types_spk_test.pkl', 'rb'))
nums_types_spk_train = pickle.load(op
pyspark分布式预测本地训练的深度模型
于 2024-12-17 15:07:36 首次发布

最低0.47元/天 解锁文章
1007

被折叠的 条评论
为什么被折叠?



