【笔记】用户新增预测挑战赛任务2

任务2.1:数据分析与可视化

# 导入库
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns

# 读取训练集和测试集文件
train_data = pd.read_csv('用户新增预测挑战赛公开数据/train.csv')
test_data = pd.read_csv('用户新增预测挑战赛公开数据/test.csv')

# 相关性热力图
sns.heatmap(train_data.corr().abs(), cmap='YlOrRd')

# x7分组下标签均值
sns.barplot(x='x7', y='target', data=train_data)

# 遍历train_data的所有列
for column in train_data.columns:
    # 获取每一列的数据类型
    dtype = train_data[column].dtypes
    
    # 判断数据类型
    if np.issubdtype(dtype, np.number):
        print(f"{column}是数值类型")
    elif dtype == 'object':
        print(f"{column}是类别类型")

# 对于数值类型的字段,考虑绘制在标签分组下的箱线图。

for column in train_data.columns:
    plt.figure()
    train_data[column].value_counts().plot(kind='box')
    plt.title(f"{column}的箱线图")
    plt.show()

# 从common_ts中提取小时,绘制每小时下标签分布的变化。

train_data['common_ts'] = pd.to_datetime(train_data['common_ts'], unit='ms')
test_data['common_ts'] = pd.to_datetime(test_data['common_ts'], unit='ms')

hourly_data = train_data['common_ts'].dt.hour.value_counts().sort_index()
plt.figure()
hourly_data.plot(kind='bar')
plt.title("每小时下标签分布的变化")
plt.show()

 

 

 

任务2.2:模型交叉验证

实操步骤:

  1. 加载数据集,并对数据进行编码

  2. 导入多个模型进行交叉验证

  3. 比较模型的F1精度

 

# 导入库
import pandas as pd
import numpy as np

# 读取训练集和测试集文件
train_data = pd.read_csv('用户新增预测挑战赛公开数据/train.csv')
test_data = pd.read_csv('用户新增预测挑战赛公开数据/test.csv')

# 提取udmap特征,人工进行onehot
def udmap_onethot(d):
    v = np.zeros(9)
    if d == 'unknown':
        return v
    d = eval(d)
    for i in range(1, 10):
        if 'key' + str(i) in d:
            v[i-1] = d['key' + str(i)]
            
    return v
train_udmap_df = pd.DataFrame(np.vstack(train_data['udmap'].apply(udmap_onethot)))
test_udmap_df = pd.DataFrame(np.vstack(test_data['udmap'].apply(udmap_onethot)))
train_udmap_df.columns = ['key' + str(i) for i in range(1, 10)]
test_udmap_df.columns = ['key' + str(i) for i in range(1, 10)]

# 编码udmap是否为空
train_data['udmap_isunknown'] = (train_data['udmap'] == 'unknown').astype(int)
test_data['udmap_isunknown'] = (test_data['udmap'] == 'unknown').astype(int)

# udmap特征和原始数据拼接
train_data = pd.concat([train_data, train_udmap_df], axis=1)
test_data = pd.concat([test_data, test_udmap_df], axis=1)

# 提取eid的频次特征
train_data['eid_freq'] = train_data['eid'].map(train_data['eid'].value_counts())
test_data['eid_freq'] = test_data['eid'].map(train_data['eid'].value_counts())

# 提取eid的标签特征
train_data['eid_mean'] = train_data['eid'].map(train_data.groupby('eid')['target'].mean())
test_data['eid_mean'] = test_data['eid'].map(train_data.groupby('eid')['target'].mean())

# 提取时间戳
train_data['common_ts'] = pd.to_datetime(train_data['common_ts'], unit='ms')
test_data['common_ts'] = pd.to_datetime(test_data['common_ts'], unit='ms')
train_data['common_ts_hour'] = train_data['common_ts'].dt.hour
test_data['common_ts_hour'] = test_data['common_ts'].dt.hour

# 导入模型
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier

# 导入交叉验证和评价指标
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report

# 训练并验证SGDClassifier
pred = cross_val_predict(
    SGDClassifier(max_iter=10),
    train_data.drop(['udmap', 'common_ts', 'uuid', 'target'], axis=1),
    train_data['target']
)
print(classification_report(train_data['target'], pred, digits=3))

# 训练并验证DecisionTreeClassifier
pred = cross_val_predict(
    DecisionTreeClassifier(),
    train_data.drop(['udmap', 'common_ts', 'uuid', 'target'], axis=1),
    train_data['target']
)
print(classification_report(train_data['target'], pred, digits=3))

# 训练并验证MultinomialNB
pred = cross_val_predict(
    MultinomialNB(),
    train_data.drop(['udmap', 'common_ts', 'uuid', 'target'], axis=1),
    train_data['target']
)
print(classification_report(train_data['target'], pred, digits=3))

# 训练并验证RandomForestClassifier
pred = cross_val_predict(
    RandomForestClassifier(n_estimators=5),
    train_data.drop(['udmap', 'common_ts', 'uuid', 'target'], axis=1),
    train_data['target']
)
print(classification_report(train_data['target'], pred, digits=3))

 上面模型中,RandomForestClassifier的macro F1效果最好。这是因为RandomForestClassifier是一种集成学习方法,它通过构建多个决策树并综合它们的预测结果来提高模型的性能。由于它在每个子集上进行训练和验证,并且使用了多个决策树,因此它可以更好地捕捉到数据中的噪声和不同特征之间的关联性,从而提高了模型的泛化能力。

from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt

# 训练决策树模型
dt_model = DecisionTreeClassifier()
dt_model.fit(train_data.drop(['udmap', 'common_ts', 'uuid', 'target'], axis=1), train_data['target'])

# 计算特征重要性
importances = dt_model.feature_importances_
indices = np.argsort(importances)[::-1]

# 绘制特征重要性柱状图
plt.figure(figsize=(10, 6))
plt.title('Feature Importance')
plt.bar(range(len(indices)), importances[indices], align='center')
plt.xticks(range(len(indices)), [features[i] for i in indices])
plt.xlim([-1, len(indices)])
plt.show()
# 导入其他模型
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier

# 创建模型列表
models = [
    ('SGDClassifier', SGDClassifier(max_iter=10)),
    ('DecisionTreeClassifier', DecisionTreeClassifier()),
    ('MultinomialNB', MultinomialNB()),
    ('RandomForestClassifier', RandomForestClassifier(n_estimators=5)),
    ('SVC', SVC()),
    ('GradientBoostingClassifier', GradientBoostingClassifier())
]

# 训练并验证每个模型
for name, model in models:
    model.fit(train_data.drop(['udmap', 'common_ts', 'uuid', 'target'], axis=1), train_data['target'])
    pred = cross_val_predict(model, train_data.drop(['udmap', 'common_ts', 'uuid', 'target'], axis=1), train_data['target'])
    print(f'{name} F1 Score: {classification_report(train_data["target"], pred, digits=3)[2]}')

 任务2.3:特征工程

 实操步骤:
1. common_ts_day 特征:从时间戳 common_ts 中提取出日期部分的天数,以创建一个新的特征表示访问日期。
2. x1_freq 和 x1_mean 特征:计算特征 x1 在训练集中的频次(出现次数)以及在训练集中对应的目标列 target 的均值。这些特征可以捕捉 x1 特征的重要性和与目标的关系。
3. 类似地,对于其他 x2、x3、x4、x6、x7 和 x8 特征,进行了类似的操作,分别计算它们在训练集中的频次和均值。


train_data['common_ts_day'] = train_data['common_ts'].dt.day
test_data['common_ts_day'] = test_data['common_ts'].dt.day

train_data['x1_freq'] = train_data['x1'].map(train_data['x1'].value_counts())
test_data['x1_freq'] = test_data['x1'].map(train_data['x1'].value_counts())
train_data['x1_mean'] = train_data['x1'].map(train_data.groupby('x1')['target'].mean())
test_data['x1_mean'] = test_data['x1'].map(train_data.groupby('x1')['target'].mean())

train_data['x2_freq'] = train_data['x2'].map(train_data['x2'].value_counts())
test_data['x2_freq'] = test_data['x2'].map(train_data['x2'].value_counts())
train_data['x2_mean'] = train_data['x2'].map(train_data.groupby('x2')['target'].mean())
test_data['x2_mean'] = test_data['x2'].map(train_data.groupby('x2')['target'].mean())

train_data['x3_freq'] = train_data['x3'].map(train_data['x3'].value_counts())
test_data['x3_freq'] = test_data['x3'].map(train_data['x3'].value_counts())

train_data['x4_freq'] = train_data['x4'].map(train_data['x4'].value_counts())
test_data['x4_freq'] = test_data['x4'].map(train_data['x4'].value_counts())

train_data['x6_freq'] = train_data['x6'].map(train_data['x6'].value_counts())
test_data['x6_freq'] = test_data['x6'].map(train_data['x6'].value_counts())
train_data['x6_mean'] = train_data['x6'].map(train_data.groupby('x6')['target'].mean())
test_data['x6_mean'] = test_data['x6'].map(train_data.groupby('x6')['target'].mean())

train_data['x7_freq'] = train_data['x7'].map(train_data['x7'].value_counts())
test_data['x7_freq'] = test_data['x7'].map(train_data['x7'].value_counts())
train_data['x7_mean'] = train_data['x7'].map(train_data.groupby('x7')['target'].mean())
test_data['x7_mean'] = test_data['x7'].map(train_data.groupby('x7')['target'].mean())

train_data['x8_freq'] = train_data['x8'].map(train_data['x8'].value_counts())
test_data['x8_freq'] = test_data['x8'].map(train_data['x8'].value_counts())
train_data['x8_mean'] = train_data['x8'].map(train_data.groupby('x8')['target'].mean())
test_data['x8_mean'] = test_data['x8'].map(train_data.groupby('x8')['target'].mean())

 

#额外特征
train_data['common_ts_month'] = train_data['common_ts'].dt.month
test_data['common_ts_month'] = test_data['common_ts'].dt.month
train_data['common_ts_year'] = train_data['common_ts'].dt.year
test_data['common_ts_year'] = test_data['common_ts'].dt.year

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值