# 导入相关库
import numpy as np
import pandas as pd
from datetime import date
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
# ------------------------------------ 第一部分 读取数据 --------------------------------------#
off_train_data = pd.read_csv('./data/ccf_offline_stage1_train.csv',encoding='utf-8')
off_test_data = pd.read_csv('./data/ccf_offline_stage1_test_revised.csv',encoding='utf-8')
# 填充缺失值
off_train_data = off_train_data.fillna('null')
off_test_data = off_test_data.fillna('null')
# 用label列标记分类,label的取值为1表示正样本,0表示负样本,-1表示普通用户
def label(row):
if row['Date'] != 'null' and row['Date_received'] != 'null':
if row['Coupon_id'] != 'null':
# 优惠券id不为空,表示使用了优惠券购买
# 计算使用优惠券的日期差值
td = pd.to_datetime(row['Date'], format='%Y%m%d') - \
pd.to_datetime(row['Date_received'], format='%Y%m%d')
if td <= pd.Timedelta(15, 'D'): # 判断使用优惠券是否在15天内
return 1
else:
return 0
else:
# 其他情况:优惠券id为null,表示没有使用优惠券购买
return 0
if row['Date'] == 'null' and row['Date_received'] != 'null':
return 0
else:
return -1
# 标记label的值达到分类目的
off_train_data['label'] = off_train_data.apply(label, axis = 1)
off_train_data['label'].value_counts()
'''
0 988887
-1 701602
1 64395
Name: label, dtype: int64
'''
off_train_data.columns
'''
Index(['User_id', 'Merchant_id', 'Coupon_id', 'Discount_rate', 'Distance',
'Date_received', 'Date', 'label'],
dtype='object')
'''
# ------------------------------------ 第二部分 数据取样 --------------------------------------#
# 随机抽样,按照正负样例1:1
# 切片
X = off_train_data.loc[:,off_train_data.columns != 'label'] # 数据
y = off_train_data.loc[:,off_train_data.columns == 'label'] # 标签
# 获取类1标签的长度
count_one_Class = len(off_train_data[off_train_data['label'] == 1])
# 获取两类标签的索引
one_Class_index = off_train_data[off_train_data['label'] == 1].index
zero_Class_index = off_train_data[off_train_data['label'] == 0].index
# 随机选择相同数目的索引
np.random.seed(25) # 定义随机种子
random_zero_index = np.array(np.random.choice(zero_Class_index,count_one_Class,replace=True))
# 合并模型样本索引
sample = np.concatenate([one_Class_index,random_zero_index])
# 按照索引获取行
off_train_data = off_train_data.loc[sample,:]
# 打印结果对比
print('label为1的数目:', len(one_Class_index))
print('label为0的数目:', len(zero_Class_index))
print('总数:', len(one_Class_index) + len(zero_Class_index))
print('抽样label为1的数目:', len(one_Class_index))
print('随机抽取label为0的数目:', len(random_zero_index))
print('抽样总数:', len(one_Class_index) + len(random_zero_index))
print('总样本形状:', off_train_data.shape)
# ------------------------------------ 第三部分 数据探索 --------------------------------------#
# 探索性分析
off_train_data.value_counts()
# 变换距离的类型,缺失值替换为-1
off_train_data['Distance'] = off_train_data['Distance'].replace('null', -1).astype(int)
off_test_data['Distance'] = off_test_data['Distance'].replace('null', -1).astype(int)
print('查看缺失值结果:\n',off_train_data.isnull().sum())
# 描述性统计分析
description = off_train_data.describe()
# 依次计算极差值、变异系数、四分位数间距
description.loc['range'] = description.loc['max'] - description.loc['min']
description.loc['var'] = description.loc['std'] - description.loc['mean']
description.loc['dis'] = description.loc['75%'] - description.loc['25%']
print('描述性统计结果:\n',np.round(description, 2)) # 保留两位小数
# 相关性分析
corr = off_train_data.corr(method = 'pearson') # 计算相关系数矩阵
print('相关系数矩阵为:\n',np.round(corr, 2)) # 保留两位小数
# 绘制相关性热力图
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams['font.sans-serif'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False
plt.subplots(figsize=(10, 10)) # 设置画面大小
sns.heatmap(corr, annot=True, vmax=1, square=True, cmap="Blues")
plt.title('相关性热力图')
plt.show()
# 距离箱型图
off_train_data['Distance'].isnull().sum()
off_test_data['Distance'].isnull().sum()
D1 = np.array(off_train_data['Distance'].values)
D2 = np.array(off_test_data['Distance'].values)
plt.boxplot([D1,D2],labels=('off_train_data','off_test_data'))
plt.title('距离箱型图')
plt.show()
# ------------------------------------ 第四部分 数据预处理 --------------------------------------#
# 属性构造
# 定义函数处理折扣率
def convertRate(row):
if row == 'null':
return 1.0
elif ':' in str(row):
rows = row.split(':')
return 1.0 - float(rows[1]) / float(rows[0])
else:
return float(row)
# 定义函数处理折扣率类型
def getDiscountType(row):
if row == -1:
return -1
elif ':' in row:
# 满多少减多少
return 1
else:
# 折扣率
return 0
# 定义函数处理满减类型的折扣优惠券
def Man_Rate(row):
if row == -1:
return 0
elif ':' in str(row):
rows = row.split(':')
return int(rows[0])
else:
return 0
def Jian_Rate(row):
if row == -1:
return 0
elif ':' in str(row):
rows = row.split(':')
return int(rows[1])
else:
return 0
# 调用函数构建跟折扣率相关的四个特征属性
off_train_data['Dis_rate'] = off_train_data['Discount_rate'].apply(convertRate)
off_train_data['Discount_type'] = off_train_data['Discount_rate'].apply(getDiscountType)
off_train_data['Discount_man'] = off_train_data['Discount_rate'].apply(Man_Rate)
off_train_data['Discount_jian'] = off_train_data['Discount_rate'].apply(Jian_Rate)
off_test_data['Dis_rate'] = off_test_data['Discount_rate'].apply(convertRate)
off_test_data['Discount_type'] = off_test_data['Discount_rate'].apply(getDiscountType)
off_test_data['Discount_man'] = off_test_data['Discount_rate'].apply(Man_Rate)
off_test_data['Discount_jian'] = off_test_data['Discount_rate'].apply(Jian_Rate)
'''
# 保存文件,方便下次读取操作
off_train_data.to_csv('./final_off_train_data.csv')
off_test_data.to_csv('./final_off_test_data.csv')
# 读取保存的文件
off_train_data = pd.read_csv('./final_off_train_data.csv',index_col=0)
off_test_data = pd.read_csv('./final_off_test_data.csv',index_col=0)
'''
# 根据label标签来提取正、负样本的全部数据
data = off_train_data[off_train_data['label'] != -1]
data = data.fillna(-1)
data['label'].value_counts()
# 获取领券日期是星期几,并且构建成特征
def getWeekday(row):
if row == 'null':
return row
else:
return date(int(row[0:4]),int(row[4:6]),int(row[6:8])).weekday()+1
data['Weekday'] = data['Date_received'].astype(str).apply(getWeekday)
off_test_data['Weekday'] = off_test_data['Date_received'].astype(str).apply(getWeekday)
# 周六日为类型1,其他为0
data['Is_weekend'] = data['Weekday'].apply(lambda x: 1 if x in [6,7] else 0)
off_test_data['Is_weekend'] = off_test_data['Weekday'].apply(lambda x: 1 if x in [6,7] else 0)
# 跟星期相关的独热编码
def One_hot(df):
weekdaycols = ['weekday' + str(i) for i in range(1,8)]
tmpdf = pd.get_dummies(df['Weekday'].replace('null', np.nan))
tmpdf.columns = weekdaycols
df[weekdaycols] = tmpdf
return df
data = One_hot(data)
off_test_data = One_hot(off_test_data)
# 定义函数得到另外相关的三个特征属性
def func(data):
# 提取用户使用的优惠券的数量
f = data[['User_id', 'Coupon_id']]
f['rec_coupon'] = 1
f = f.groupby(['User_id']).agg('sum').reset_index()
# 全部优惠券的数目
f1 = data[['Coupon_id']]
l1 = len(f1)
f1['Number_coupon'] = 1
f1 = f1.groupby(['Coupon_id']).agg('sum').reset_index()
# 优惠券的流行度:每一种优惠券占全部优惠券的比例
f1['Coupon_popu'] = f1['Number_coupon']/l1
# 提取用户和商家
f2 = data[['User_id','Merchant_id']]
l2 = len(f2)
# 提取顾客去商家的数量
f2['Number_merchant'] = 1
f2 = f2.groupby(['Merchant_id']).agg('sum').reset_index()
# 商家的受欢迎度:商家的顾客(用户)占全部商家的顾客(用户)的比例
f2['Merchant_popu'] = f2['Number_merchant']/l2
# 合并特征属性
d0 = pd.merge(data, f[['User_id','rec_coupon']], on='User_id')
d1 = pd.merge(d0,f1[['Coupon_id','Coupon_popu']],on='Coupon_id')
d2 = pd.merge(d1,f2[['Merchant_id','Merchant_popu']],on=['Merchant_id'])
return d2
# 调用以上函数构建相关的三个特征属性
new_data = func(data)
new_test_data = func(off_test_data)
# 定义函数得到跟距离相关的特征属性
def Get_mer_dis(new_data):
# 查看距离的类别数量
new_data['Distance'].value_counts()
# 提取用户-商家距离的数据集
md1 = new_data[new_data.Coupon_id != 'null'][['Merchant_id', 'Distance']]
md1.replace('null', -1, inplace=True)
md1.replace(-1, np.nan, inplace=True)
# 用户-商家的距离最小值
md2 = md1.groupby('Merchant_id').agg('min').reset_index()
md2.rename(columns={'Distance': 'merchant_min_distance'}, inplace=True)
# 用户-商家的距离最大值
md3 = md1.groupby('Merchant_id').agg('max').reset_index()
md3.rename(columns={'Distance': 'merchant_max_distance'}, inplace=True)
# 用户-商品的距离平均值
md4 = md1.groupby('Merchant_id').agg('mean').reset_index()
md4.rename(columns={'Distance': 'merchant_mean_distance'}, inplace=True)
# 用户-离商品的距离中位值
md5 = md1.groupby('Merchant_id').agg('median').reset_index()
md5.rename(columns={'Distance': 'merchant_median_distance'}, inplace=True)
# 将所有特征合并在一起
merchant_feature = pd.merge(md2, md3, on='Merchant_id', how='left')
merchant_feature = pd.merge(merchant_feature, md4, on='Merchant_id', how='left')
merchant_feature = pd.merge(merchant_feature, md5, on='Merchant_id', how='left')
new_data = pd.merge(new_data,merchant_feature,on='Merchant_id',how='left')
return new_data
# 调用上边函数构建距离相关的特征属性
new_data = Get_mer_dis(new_data)
new_test_data = Get_mer_dis(new_test_data)
# 距离柱状图
x = np.arange(-1,11)
dis1 = np.array(new_data['Distance'].value_counts().sort_index())
dis2 = np.array(new_test_data['Distance'].value_counts().sort_index())
plt.bar(x,dis1,tick_label=x, label='off_train_data', width=0.3)
plt.bar(x+0.3,dis2,label='off_test_data',width=0.3)
plt.xlabel('距离')
plt.ylabel('计数')
plt.title('距离计数分布情况')
plt.legend()
plt.show()
# 定义变换距离类型的函数
def get_distance_type(row):
# 距离未知
if row == -1:
return -1
# 1千米内
elif row >= 0 and row <= 2:
return 0
# 1.5-2.5千米内
elif row >= 3 and row <= 5:
return 1
# 3-4.5千米内
elif row >= 6 and row <= 9:
return 2
# 5千米
elif row == 10:
return 3
# 构建距离类型的特征属性
new_data['Distance_type'] = new_data['Distance'].apply(get_distance_type)
new_test_data['Distance_type'] = new_test_data['Distance'].apply(get_distance_type)
# 新距离类型分布情况
new_data['Distance_type'].value_counts()
new_test_data['Distance_type'].value_counts()
'''
0 84309
-1 13858
3 13321
1 11194
2 6108
Name: Distance_type, dtype: int64
'''
# 距离柱状图
x1 = np.arange(-1,4)
dis_type1 = np.array(new_data['Distance_type'].value_counts().sort_index())
dis_type2 = np.array(new_test_data['Distance_type'].value_counts().sort_index())
plt.bar(x1,dis_type1,tick_label=x1, label='off_train_data', width=0.3)
plt.bar(x1+0.3,dis_type2,label='off_test_data',width=0.3)
plt.xlabel('距离类型')
plt.ylabel('计数')
plt.title('距离类型计数分布情况')
plt.legend()
plt.show()
# 跟距离相关的独热编码
def Get_dis_hot(df):
discols = ['Distance' + str(i) for i in range(-1,11)]
tmpdf = pd.get_dummies(df['Distance'].replace('null', np.nan))
tmpdf.columns = discols
df[discols] = tmpdf
return df
new_data = Get_dis_hot(new_data)
new_test_data = Get_dis_hot(new_test_data)
# 集成新的数据集后,对缺失值处理
new_data = new_data.fillna(-1)
new_test_data = new_test_data.fillna(-1)
new_data.isnull().sum()
new_test_data.isnull().sum()
# 再次保存最终文件,方便下次直接读取进行预测操作
'''
new_data.to_csv('./final_new_data.csv')
new_test_data.to_csv('./final_new_test_data.csv')
# 读取最终保存的文件
new_data = pd.read_csv('./final_new_data.csv',index_col=0)
new_test_data = pd.read_csv('./final_new_test_data.csv',index_col=0)
'''
# ------------------------------------ 第五部分 挖掘建模 --------------------------------------#
# 查看属性特征
new_data.columns
new_test_data.columns
# 选取要预测的特征
features = ['Dis_rate','Discount_type','Discount_man', 'Discount_jian',
'Distance','Distance_type','Distance-1', 'Distance0',
'Distance1', 'Distance2', 'Distance3', 'Distance4', 'Distance5',
'Distance6', 'Distance7', 'Distance8', 'Distance9', 'Distance10',
'rec_coupon','Coupon_popu','Merchant_popu','merchant_min_distance',
'merchant_max_distance','merchant_mean_distance','merchant_median_distance',
'Is_weekend','Weekday','weekday1','weekday2','weekday3','weekday4',
'weekday5','weekday6','weekday7']
# 提取指定特征属性的数据集,并且划分为训练集和验证集
X = new_data[features]
y = new_data['label']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,stratify=y)
# 进行网格搜索调参
# from sklearn.model_selection import GridSearchCV
# param_second_grid = {
# 'learning_rate':[0.1,0.11,0.12,0.13],
# 'max_depth': [4],
# 'n_estimators': [160]
# }
# grid_second_search = GridSearchCV(estimator = GradientBoostingClassifier(),
# param_grid = param_second_grid, scoring = 'neg_mean_absolute_error', cv = 3)
# grid_second_search.fit(X_train , y_train)
# grid_second_search.best_params_
'''
{'learning_rate': 0.13, 'max_depth': 4, 'n_estimators': 160}
'''
# 集成学习提升决策树分类模型
# n 160 d 4 a 0.8890163692846558 s 0.6836
# n 160 d 4 l 0.12 a 0.8902440244868226 s 0.6825
GBC_model = GradientBoostingClassifier(n_estimators=160,max_depth=4)
GBC_model.fit(X,y)
# 预测样本的各类标签(这里是0和1)的概率
GBC_model.predict_proba(X_test)
# 预测标签为1的概率
y_predict = GBC_model.predict_proba(X_test)[:,1]
# AUC值,验证集上的性能结果
y_auc = roc_auc_score(y_test,y_predict)
# ------------------------------------ 第六部分 模型评价 --------------------------------------#
# 绘制 ROC曲线
from sklearn import metrics
import matplotlib.pyplot as plt
# fpr假正率,tpr召回率,thresholds阈值,pos_label(设置正样本值)默认为None(标签数据为二分类的情况)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_predict, pos_label=1)
roc_auc = metrics.auc(fpr, tpr) # auc为Roc曲线下的面积
plt.plot(fpr, tpr, 'r') # 绘制ROC曲线
axline = np.array([0.,0.2,0.4,0.6,0.8,1.0]) # 斜线参考线坐标
plt.plot(axline,axline,'gray',linestyle='--',alpha=0.5)
plt.grid(b=True,axis='both',alpha=0.3) # 显示网格
plt.xlim([-0.05, 1.05]) # 设置x轴刻度范围
plt.ylim([-0.05, 1.05]) # 设置y轴刻度范围
plt.xlabel('FPR') # x轴是False Positive Rate
plt.ylabel('TPR') # y轴是True Positive Rate
plt.title('AUC = %0.2f' % roc_auc)
plt.show()
# ------------------------------------ 第七部分 模型应用 --------------------------------------#
# 预测给定的测试集
pre_test = new_test_data[features]
result = GBC_model.predict_proba(pre_test)[:,1]
test_result = new_test_data[['User_id','Coupon_id','Date_received']]
test_result['Probability'] = result
# 查看描述性结果
test_result['Probability'].describe()
test_result.to_csv('./new_sample_submission.csv',index=None,header=None)
阿里云天池实战赛o2o:优惠券使用预测(代码实现)
于 2022-02-28 22:56:32 首次发布
关键词由CSDN通过智能技术生成