提示:文章写完后,目录可以自动生成,如何生成可参考右边的帮助文档
文章目录
继续调整特征+模型优化
实现代码
import pandas as pd
import numpy as np
train_data = pd.read_csv('用户新增预测挑战赛公开数据/train.csv')
test_data = pd.read_csv('用户新增预测挑战赛公开数据/test.csv')
train_data['common_ts'] = pd.to_datetime(train_data['common_ts'], unit='ms')
test_data['common_ts'] = pd.to_datetime(test_data['common_ts'], unit='ms')
def udmap_onethot(d):
v = np.zeros(9)
if d == 'unknown':
return v
d = eval(d)
for i in range(1, 10):
if 'key' + str(i) in d:
v[i-1] = d['key' + str(i)]
return v
train_udmap_df = pd.DataFrame(np.vstack(train_data['udmap'].apply(udmap_onethot)))
test_udmap_df = pd.DataFrame(np.vstack(test_data['udmap'].apply(udmap_onethot)))
train_udmap_df.columns = ['key' + str(i) for i in range(1, 10)]
test_udmap_df.columns = ['key' + str(i) for i in range(1, 10)]
train_data = pd.concat([train_data, train_udmap_df], axis=1)
test_data = pd.concat([test_data, test_udmap_df], axis=1)
train_data['eid_freq'] = train_data['eid'].map(train_data['eid'].value_counts())
test_data['eid_freq'] = test_data['eid'].map(train_data['eid'].value_counts())
train_data['eid_mean'] = train_data['eid'].map(train_data.groupby('eid')['target'].mean())
test_data['eid_mean'] = test_data['eid'].map(train_data.groupby('eid')['target'].mean())
train_data['udmap_isunknown'] = (train_data['udmap'] == 'unknown').astype(int)
test_data['udmap_isunknown'] = (test_data['udmap'] == 'unknown').astype(int)
train_data['common_ts_hour'] = train_data['common_ts'].dt.hour
test_data['common_ts_hour'] = test_data['common_ts'].dt.hour
train_data['common_ts_minute'] = train_data['common_ts'].dt.minute
test_data['common_ts_minute'] = test_data['common_ts'].dt.minute
# 提取天数
train_data['common_ts_day'] = train_data['common_ts'].dt.day
test_data['common_ts_day'] = test_data['common_ts'].dt.day
# 计算特征 x1 在训练集中的频次(出现次数)以及在训练集中对应的目标列 target 的均值。
train_data['x1_freq'] = train_data['x1'].map(train_data['x1'].value_counts())
test_data['x1_freq'] = test_data['x1'].map(train_data['x1'].value_counts())
train_data['x1_mean'] = train_data['x1'].map(train_data.groupby('x1')['target'].mean())
test_data['x1_mean'] = test_data['x1'].map(train_data.groupby('x1')['target'].mean())
train_data['key7+key8+key9'] = train_data['key7']+train_data['key8']+train_data['key9']
test_data['key7+key8+key9'] = test_data['key7']+test_data['key8']+test_data['key9']
train_data['key1~key6'] = train_data['key1']+train_data['key2']+train_data['key3']+train_data['key4']+train_data['key5']+train_data['key6']
test_data['key1~key6'] = test_data['key1']+test_data['key2']+test_data['key3']+test_data['key4']+test_data['key5']+test_data['key6']
# 创建一个新的二进制特征,表示是否在8点到15点之间
train_data['is_in_working_hours'] = train_data['common_ts_hour'].between(8, 15).astype(int)
test_data['is_in_working_hours'] = test_data['common_ts_hour'].between(8, 15).astype(int)
# 创建一个新的二进制特征,表示是否小于等于5
train_data['is_x7_less_than_5'] = (train_data['x7']<=5).astype(int)
test_data['is_x7_less_than_5'] = (test_data['x7']<=5).astype(int)
train_data['is_common_ts_day_more_10'] = (train_data['common_ts_day']>=10).astype(int)
test_data['is_common_ts_day_more_10'] = (test_data['common_ts_day']>=10).astype(int)
train_data['is_common_ts_day_is_10'] = (train_data['common_ts_day']==10).astype(int)
test_data['is_common_ts_is_10'] = (test_data['common_ts_day']==10).astype(int)
train_data['is_common_ts_day_is_14'] = (train_data['common_ts_day']==14).astype(int)
test_data['is_common_ts_is_14'] = (test_data['common_ts_day']==14).astype(int)
train_data['is_common_ts_day_is_17'] = (train_data['common_ts_day']==17).astype(int)
test_data['is_common_ts_is_17'] = (test_data['common_ts_day']==17).astype(int)
train_data['eid_key1_mean'] = train_data['eid'].map(train_data.groupby('eid')['key1'].mean())
test_data['eid_key1_mean'] = test_data['eid'].map(train_data.groupby('eid')['key1'].mean())
train_data['eid_key2_mean'] = train_data['eid'].map(train_data.groupby('eid')['key2'].mean())
test_data['eid_key2_mean'] = test_data['eid'].map(train_data.groupby('eid')['key2'].mean())
train_data['eid_key3_mean'] = train_data['eid'].map(train_data.groupby('eid')['key3'].mean())
test_data['eid_key3_mean'] = test_data['eid'].map(train_data.groupby('eid')['key3'].mean())
train_data['eid_key4_mean'] = train_data['eid'].map(train_data.groupby('eid')['key4'].mean())
test_data['eid_key4_mean'] = test_data['eid'].map(train_data.groupby('eid')['key4'].mean())
train_data['eid_key5_mean'] = train_data['eid'].map(train_data.groupby('eid')['key5'].mean())
test_data['eid_key5_mean'] = test_data['eid'].map(train_data.groupby('eid')['key5'].mean())
train_data['eid_key6_mean'] = train_data['eid'].map(train_data.groupby('eid')['key6'].mean())
test_data['eid_key6_mean'] = test_data['eid'].map(train_data.groupby('eid')['key6'].mean())
train_data['eid_key7_mean'] = train_data['eid'].map(train_data.groupby('eid')['key7'].mean())
test_data['eid_key7_mean'] = test_data['eid'].map(train_data.groupby('eid')['key7'].mean())
train_data['eid_key8_mean'] = train_data['eid'].map(train_data.groupby('eid')['key8'].mean())
test_data['eid_key8_mean'] = test_data['eid'].map(train_data.groupby('eid')['key8'].mean())
train_data['eid_key9_mean'] = train_data['eid'].map(train_data.groupby('eid')['key9'].mean())
test_data['eid_key9_mean'] = test_data['eid'].map(train_data.groupby('eid')['key9'].mean())
# 独热编码
from itertools import combinations
# 获取x1到x8特征的所有两两组合
x1_to_x8_columns = ['x1', 'x2', 'x6', 'x7', 'x8']
combinations_list = list(combinations(x1_to_x8_columns, 2))
# 对每个组合进行独热编码
for feature1, feature2 in combinations_list:
combined_feature_name = f'{feature1}_{feature2}' # 新特征的名称
combined_feature = pd.get_dummies(train_data[[feature1, feature2]], columns=[feature1, feature2], prefix=[feature1, feature2])
train_data = pd.concat([train_data, combined_feature], axis=1)
train_data.rename(columns={col: f'{combined_feature_name}_{col}' for col in combined_feature.columns}, inplace=True)
# 独热编码
from itertools import combinations
# 获取x1到x8特征的所有两两组合
x1_to_x8_columns = ['x1', 'x2', 'x6', 'x7', 'x8']
combinations_list = list(combinations(x1_to_x8_columns, 2))
# 对每个组合进行独热编码
for feature1, feature2 in combinations_list:
combined_feature_name = f'{feature1}_{feature2}' # 新特征的名称
combined_feature = pd.get_dummies(test_data[[feature1, feature2]], columns=[feature1, feature2], prefix=[feature1, feature2])
test_data = pd.concat([test_data, combined_feature], axis=1)
test_data.rename(columns={col: f'{combined_feature_name}_{col}' for col in combined_feature.columns}, inplace=True)
import lightgbm as lgb
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score
# 训练并验证DecisionTreeClassifier
X = train_data.drop(['udmap', 'common_ts', 'uuid', 'target', 'key1', 'key2', 'key3', 'key4', 'key5', 'key6', 'key7', 'key8', 'key9'], axis=1)
y = train_data['target']
clf = DecisionTreeClassifier()
pred = cross_val_predict(clf, X, y)
report = classification_report(y, pred, digits=3)
print(report)
总结
线下测试0.8+,线上只有0.75+。可能模型有些过拟合了。
另外,目前还没有尝试更换其它模型,后续可以进行其它模型尝试。