sesection7——逻辑回归分析—根据过去的行为能否预测当下

import pandas as pd
import numpy as np
import pymysql
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
%matplotlib inline
# 数据库引擎
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/datascience')
## 读取数据
# 读取数据
data = 'data/section7-dau.csv'
dau = pd.read_csv(data)

# 写入 MYSQL
# dau.to_sql('s7_dau',engine,index=False)

dau.head()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
region_monthregion_dayapp_nameuser_iddevice
02013-012013-01-01game-0210061580FP
12013-012013-01-01game-0210154440FP
22013-012013-01-01game-0210164762SP
32013-012013-01-01game-0210165615FP
42013-012013-01-01game-0210321356FP
# 查看数据信息
dau.info()
print(dau.region_month.value_counts())
print(dau.region_day.unique())
print(dau.device.value_counts())
2013-01 25847 2013-02 23141 Name: region_month, dtype: int64 [‘2013-01-01’ ‘2013-01-02’ ‘2013-01-03’ ‘2013-01-04’ ‘2013-01-05’ ‘2013-01-06’ ‘2013-01-07’ ‘2013-01-08’ ‘2013-01-09’ ‘2013-01-10’ ‘2013-01-11’ ‘2013-01-12’ ‘2013-01-13’ ‘2013-01-14’ ‘2013-01-15’ ‘2013-01-16’ ‘2013-01-17’ ‘2013-01-18’ ‘2013-01-19’ ‘2013-01-20’ ‘2013-01-21’ ‘2013-01-22’ ‘2013-01-23’ ‘2013-01-24’ ‘2013-01-25’ ‘2013-01-26’ ‘2013-01-27’ ‘2013-01-28’ ‘2013-01-29’ ‘2013-01-30’ ‘2013-01-31’ ‘2013-02-01’ ‘2013-02-02’ ‘2013-02-03’ ‘2013-02-04’ ‘2013-02-05’ ‘2013-02-06’ ‘2013-02-07’ ‘2013-02-08’ ‘2013-02-09’ ‘2013-02-10’ ‘2013-02-11’ ‘2013-02-12’ ‘2013-02-13’ ‘2013-02-14’ ‘2013-02-15’ ‘2013-02-16’ ‘2013-02-17’ ‘2013-02-18’ ‘2013-02-19’ ‘2013-02-20’ ‘2013-02-21’ ‘2013-02-22’ ‘2013-02-23’ ‘2013-02-24’ ‘2013-02-25’ ‘2013-02-26’ ‘2013-02-27’ ‘2013-02-28’] FP 30331 SP 18657 Name: device, dtype: int64 ## 关于用户是否进行了账号迁转的数据的整理 #### 提取需要的数据列,去除重复项,得到 用户按月份和设备登陆的信息
mau = dau[['region_month','user_id','device']]
mau.head()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
region_monthuser_iddevice
02013-0110061580FP
12013-0110154440FP
22013-0110164762SP
32013-0110165615FP
42013-0110321356FP
# 重复数据 (用户在某一月使用相同设备登陆)
print(mau.duplicated().sum())
mau.drop_duplicates(inplace=True)
print(mau.duplicated().sum())
46007 0 D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy This is separate from the ipykernel package so we can avoid doing imports until #### 非智能手机和智能手机分开
fp = dau[dau['device']=='FP'][['region_month','user_id','device']].drop_duplicates()
sp = dau[dau['device']=='SP'][['region_month','user_id','device']].drop_duplicates()
print(fp.info())
print(sp.info())
#### 分别获取1月份和2月份的数据
# 分别获取1月份和2月份的数据

fp_m1 = fp[fp['region_month']=='2013-01']
fp_m2 = fp[fp['region_month']=='2013-02']

sp_m1 = sp[sp['region_month']=='2013-01']
sp_m2 = sp[sp['region_month']=='2013-02']
#### 1月份的非智能手机用户在2月份的访问情况
# 1月份的非智能手机用户在2月份的访问情况

mau['is_access'] = 1
fp_m1 = pd.merge(fp_m1,mau[mau['region_month']=='2013-02'][['user_id','is_access']],how='left',on='user_id')
fp_m1['is_access'].fillna(0,inplace=True)

fp_m1.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy This is separate from the ipykernel package so we can avoid doing imports until
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
region_monthuser_iddeviceis_access
02013-0110061580FP1.0
12013-0110154440FP0.0
22013-0110165615FP1.0
32013-0110321356FP1.0
42013-0110447112FP1.0
#### 1月份访问过游戏的非智能手机用户在2月份是否是继续通过非智能手机来访问的
# 1月份访问过游戏的非智能手机用户在2月份是否是继续通过非智能手机来访问的

fp_m2['is_fp'] = 1
fp_m1 = pd.merge(fp_m1,fp_m2[['user_id','is_fp']],how='left',on='user_id')
fp_m1['is_fp'].fillna(0,inplace=True)

fp_m1.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy This is separate from the ipykernel package so we can avoid doing imports until
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
region_monthuser_iddeviceis_accessis_fp
02013-0110061580FP1.01.0
12013-0110154440FP0.00.0
22013-0110165615FP1.01.0
32013-0110321356FP1.01.0
42013-0110447112FP1.01.0
#### 1月份访问过游戏的非智能手机用户在2月份是否是通过智能手机来访问的
# 1月份访问过游戏的非智能手机用户在2月份是否是通过智能手机来访问的

sp_m2['is_sp'] = 1
fp_m1 = pd.merge(fp_m1,sp_m2[['user_id','is_sp']],how='left',on='user_id')
fp_m1['is_sp'].fillna(0,inplace=True)

fp_m1.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy This is separate from the ipykernel package so we can avoid doing imports until
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
region_monthuser_iddeviceis_accessis_fpis_sp
02013-0110061580FP1.01.00.0
12013-0110154440FP0.00.00.0
22013-0110165615FP1.01.00.0
32013-0110321356FP1.01.00.0
42013-0110447112FP1.01.00.0
#### 1月份通过非智能手机访问但2月份没有访问的用户,或者通过智能手机访问的用户
# 1月份通过非智能手机访问但2月份没有访问的用户,或者通过智能手机访问的用户

fp_m1 = fp_m1[(fp_m1['is_access']==0) | (fp_m1['is_sp']==1)]
fp_m1.head()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
region_monthuser_iddeviceis_accessis_fpis_sp
12013-0110154440FP0.00.00.0
72013-0110528830FP0.00.00.0
202013-011163733FP1.00.01.0
212013-0111727630FP0.00.00.0
432013-0113401362FP1.00.01.0
#### 以上得到的即是可用于逻辑回归的标签项 ## 关于是否是每天访问游戏的数据的整理
# 标记每天登陆记录

fp_dau = dau[(dau['device']=='FP') & (dau['region_month']=='2013-01')]
fp_dau['is_access'] = 1
fp_dau.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning: A value is trying to be set on a copy of a slice from a DataFrame. Try using .loc[row_indexer,col_indexer] = value instead See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy after removing the cwd from sys.path.
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
region_monthregion_dayapp_nameuser_iddeviceis_access
02013-012013-01-01game-0210061580FP1
12013-012013-01-01game-0210154440FP1
32013-012013-01-01game-0210165615FP1
42013-012013-01-01game-0210321356FP1
62013-012013-01-01game-0210447112FP1
# COLUMNS 名字
b = []
for a in np.arange(1,32):
    b.append('X'+str(a)+'day')
# b.insert(0,'user_id')

# 透视表转化为登陆信息
fp_dau_pivot = pd.pivot_table(fp_dau, values='is_access', columns='region_day', index='user_id', fill_value=0)
fp_dau_pivot.columns = b
fp_dau_pivot.reset_index(inplace=True)
fp_dau_pivot.head()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX22dayX23dayX24dayX25dayX26dayX27dayX28dayX29dayX30dayX31day
03972861111111111111111111
14713411111000000000000000
25038741000000000000000000
35122501111111111111111111
45138110000000001000001101

5 rows × 32 columns

# 将2月份的访问数据和智能手机用户数据合并,注意这里是 inner 。。

fp_dau_m = pd.merge(fp_dau_pivot, fp_m1[['user_id','is_sp']], how='inner', on='user_id')

fp_dau_m.head()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX23dayX24dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_sp
04713411111000000000000001.0
15038741000000000000000000.0
210735440000000001110000000.0
310738640000000000000000000.0
411637331100000001111110001.0

5 rows × 33 columns

fp_dau_m.isna().sum().sum()
0
fp_dau_m.is_sp.value_counts()
0.0 190 1.0 62 Name: is_sp, dtype: int64 #### 以上数据显示,is_sp 指示: 1表示2月份通过智能手机来访问的用户, 0表示用户为流失用户 2月份流失的用户数有190个, 更换为智能手机用户数为62个! ## 逻辑回归处理 ### 1.sklearn #### 通过修改 solve 和 惩罚系数 C ,可以将模型的准确度提升至 100%
from sklearn.linear_model import LogisticRegression

lr = LogisticRegression(solver='lbfgs',C=10)

x = fp_dau_m.iloc[:,1:-1]
y = fp_dau_m.iloc[:,-1]

lr.fit(x,y)

print('系数项:',lr.coef_)
print('截距项:',lr.intercept_)
print('得分是:',lr.score(x,y))
系数项: [[ 1.64264315 0.38232509 0.27375659 1.77818234 -1.2604587 -0.62425027 1.64964331 0.94366796 -0.30971957 -2.45689215 1.05453162 -0.49567095 1.37452985 -0.79198757 -1.39648934 0.18038175 -0.34026571 1.01401641 -0.49919155 -0.25791649 0.98296119 1.03952236 -1.03446927 1.53177282 -0.12212919 0.30942289 0.31267693 -0.08203749 1.32893163 1.57890364 1.29380472]] 截距项: [-3.9031072] 得分是: 0.9047619047619048
yp = lr.predict_proba(x)[:,1]
df = fp_dau_m.copy()
df['prob'] = yp
df['pred'] = df['prob'].apply(lambda x: 1 if x > 0.5 else 0)
df.head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
047134111110000000000001.00.5433411
150387410000000000000000.00.0944510
2107354400000000010000000.00.0025100
3107386400000000000000000.00.0255670
4116373311000000011110001.00.8498381
5145462900000000000000000.00.0738790
6155762800000000100000010.00.0512210
7224146210000000000000001.00.0944510
8231323600000000000000000.00.0853850
9247768500000000010000000.00.0175460
10254174100000000000000000.00.0017260
11262866100000100001000000.00.0145150
12350943601011101111111111.00.9879401
13350943601011101111111111.00.9879401
14395595011110000000000000.00.5433411

15 rows × 35 columns

df.groupby(['is_sp','pred'])['user_id'].count().reset_index()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
is_sppreduser_id
00.00181
10.019
21.0015
31.0147
len(df[df['is_sp']==df['pred']])/len(df)
0.9047619047619048 #### 此模型,无需修改任何参数即可达到准确度 100% 。 重点在于 solve 和 C 的参数。
from sklearn.linear_model import LogisticRegressionCV

lr = LogisticRegressionCV(cv=10)

x = fp_dau_m.iloc[:,1:-1]
y = fp_dau_m.iloc[:,-1]

lr.fit(x,y)

print('系数项:',lr.coef_)
print('截距项:',lr.intercept_)
print('-----------------------------------------------')
print('得分是: ',lr.score(x,y))
系数项: [[ 0.66247469 0.39566209 0.12089587 0.72621501 -0.14485039 -0.11496137 0.50433275 0.25667173 0.11561233 -0.48159577 0.23713178 -0.12897139 0.31542595 -0.16714406 -0.1914315 -0.09390318 -0.05036135 0.0924934 -0.14949742 -0.05918408 0.52355482 0.58543392 0.0882812 0.39783666 0.07477356 0.14874974 0.39921228 0.38402639 0.68729765 0.6331324 0.55885631]] 截距项: [-2.95546571] ———————————————– 得分是: 0.8928571428571429 ### statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as fsm     # 这个是用公式去拟合,不好用

x = fp_dau_m.iloc[:,1:-1]
x['intercept'] = 1.0          # 此处是为logistics回归添加截距项
y = fp_dau_m.iloc[:,-1]

logit = sm.Logit(y, x)
result = logit.fit(method='bfgs',maxiter=100)
Warning: Maximum number of iterations has been exceeded. Current function value: 0.222887 Iterations: 100 Function evaluations: 101 Gradient evaluations: 101 C:\Users\sylva\AppData\Roaming\Python\Python36\site-packages\statsmodels\base\model.py:508: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals “Check mle_retvals”, ConvergenceWarning)
# result1 = logit.fit_regularized(alpha=5)
result.pred_table()
array([[180., 10.], [ 14., 48.]])
# result1.pred_table()
print(result.summary2())
Results: Logit ================================================================= Model: Logit Pseudo R-squared: 0.601 Dependent Variable: is_sp AIC: 176.3352 Date: 2018-08-24 12:07 BIC: 289.2770 No. Observations: 252 Log-Likelihood: -56.168 Df Model: 31 LL-Null: -140.60 Df Residuals: 220 LLR p-value: 6.6358e-21 Converged: 0.0000 Scale: 1.0000 —————————————————————— Coef. Std.Err. z P>|z| [0.025 0.975] —————————————————————— X1day 1.9894 0.8047 2.4720 0.0134 0.4121 3.5666 X2day 0.3311 1.0705 0.3093 0.7571 -1.7671 2.4293 X3day 0.3793 0.9406 0.4033 0.6867 -1.4641 2.2227 X4day 2.0422 0.8359 2.4430 0.0146 0.4038 3.6805 X5day -1.7597 1.1991 -1.4675 0.1422 -4.1100 0.5906 X6day -0.6679 1.1717 -0.5701 0.5686 -2.9643 1.6285 X7day 2.0157 1.1176 1.8036 0.0713 -0.1747 4.2061 X8day 1.2119 1.3505 0.8974 0.3695 -1.4350 3.8589 X9day -0.4495 1.1874 -0.3786 0.7050 -2.7768 1.8778 X10day -3.2374 1.5580 -2.0779 0.0377 -6.2911 -0.1837 X11day 1.4392 1.2234 1.1764 0.2394 -0.9586 3.8370 X12day -0.6389 1.5297 -0.4176 0.6762 -3.6370 2.3592 X13day 1.7797 1.1424 1.5579 0.1193 -0.4594 4.0188 X14day -1.1242 1.2455 -0.9026 0.3668 -3.5653 1.3170 X15day -1.8115 1.3050 -1.3881 0.1651 -4.3694 0.7463 X16day 0.4940 1.1666 0.4234 0.6720 -1.7925 2.7804 X17day -0.4448 1.2234 -0.3636 0.7162 -2.8427 1.9531 X18day 1.4321 1.1465 1.2491 0.2116 -0.8150 3.6791 X19day -0.6132 1.1990 -0.5114 0.6091 -2.9632 1.7369 X20day -0.3130 1.4007 -0.2235 0.8232 -3.0585 2.4324 X21day 0.9587 1.2558 0.7634 0.4452 -1.5027 3.4201 X22day 1.1954 1.1238 1.0637 0.2875 -1.0072 3.3980 X23day -1.5371 1.2303 -1.2494 0.2115 -3.9486 0.8743 X24day 1.8445 1.1038 1.6710 0.0947 -0.3190 4.0080 X25day 0.1292 1.5317 0.0844 0.9328 -2.8727 3.1312 X26day 0.3131 1.4280 0.2192 0.8265 -2.4858 3.1119 X27day 0.3365 1.2965 0.2596 0.7952 -2.2045 2.8776 X28day -0.3918 1.8515 -0.2116 0.8324 -4.0207 3.2372 X29day 1.5941 1.0565 1.5088 0.1314 -0.4767 3.6648 X30day 1.9943 1.2117 1.6459 0.0998 -0.3806 4.3692 X31day 1.5214 1.1798 1.2896 0.1972 -0.7908 3.8337 intercept -4.2502 0.5904 -7.1985 0.0000 -5.4074 -3.0930 =================================================================
# print(result1.summary2())
xx = fp_dau_m.iloc[:,1:-1]
xx['intercept'] = 1.0         # 预测也要为logistics回归添加截距项

y_p = result.predict(xx)

ydf = fp_dau_m.copy()
ydf['prob'] = y_p
ydf['pred'] = ydf['prob'].apply(lambda x: 1 if x > 0.5 else 0)
ydf.head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
047134111110000000000001.00.6205061
150387410000000000000000.00.0944160
2107354400000000010000000.00.0008660
3107386400000000000000000.00.0191670
4116373311000000011110001.00.8705761
5145462900000000000000000.00.0779510
6155762800000000100000010.00.0399910
7224146210000000000000001.00.0944160
8231323600000000000000000.00.0827390
9247768500000000010000000.00.0159690
10254174100000000000000000.00.0005600
11262866100000100001000000.00.0099020
12350943601011101111111111.00.9924561
13350943601011101111111111.00.9924561
14395595011110000000000000.00.6205061

15 rows × 35 columns

ydf.groupby(['is_sp','pred'])['user_id'].count().reset_index()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
is_sppreduser_id
00.00180
10.0110
21.0014
31.0148
len(ydf[ydf['is_sp']==ydf['pred']])/len(ydf)
0.9047619047619048 ### 结果观察 根据 sklearn 预测的结果,有9名用户预测为1,即进行了账号迁转,但实际并没有。 根据过去的访问情况来推断,这些用户应该进行了账号迁转,然而实际却是流失的用户群体。
df.head(10)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
047134111110000000000001.00.5433411
150387410000000000000000.00.0944510
2107354400000000010000000.00.0025100
3107386400000000000000000.00.0255670
4116373311000000011110001.00.8498381
5145462900000000000000000.00.0738790
6155762800000000100000010.00.0512210
7224146210000000000000001.00.0944510
8231323600000000000000000.00.0853850
9247768500000000010000000.00.0175460

10 rows × 35 columns

df1 = df[(df['is_sp']==1) & (df['pred']==1)]
df1.sort_values(by='prob',ascending=True).head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
2285277643811111111100000001.00.5122931
1713276265211111111100000001.00.5122931
1552780062911110000000000001.00.5433411
047134111110000000000001.00.5433411
36864598000010000000001001.00.5515741
37864598000010000000001001.00.5515741
1693250033211111111110000001.00.5879231
551160034901111111100011111.00.6841981
561160034901111111100011111.00.6841981
1462578736000001011100100001.00.6962951
1452578736000001011100100001.00.6962951
4116373311000000011110001.00.8498381
481040665301111111010111111.00.8653931
491040665301111111010111111.00.8653931
1653106629901110111111101101.00.9519701

15 rows × 35 columns

df2 = df[(df['is_sp']==1) & (df['pred']==1)]
df2.sort_values(by='prob',ascending=False).head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
1362479170211010111111111111.00.9986181
1372479170211010111111111111.00.9986181
44956756211111111111111111.00.9963021
43956756211111111111111111.00.9963021
1392490078411111111111111111.00.9939231
1242311307911111111111111111.00.9939231
1332458138311111111111111111.00.9939231
1342458138311111111111111111.00.9939231
1382490078411111111111111111.00.9939231
1232311307911111111111111111.00.9939231
1142155142911111111111111111.00.9939231
1472700377011111111111111111.00.9939231
1482700377011111111111111111.00.9939231
1502760271011111111111111111.00.9939231
1512760271011111111111111111.00.9939231

15 rows × 35 columns

df3 = df[(df['is_sp']==0) & (df['pred']==1)]
df3.sort_values(by='prob',ascending=False).head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
1944159080100000000000001010.00.6774581
1081943209911110111100000000.00.6430611
2034345194711111011110011000.00.5999211
1974227614211111101111111000.00.5774201
2094628544600001111111101000.00.5768731
14395595011110000000000000.00.5433411
1582839189611111111100000000.00.5122931
2405956127611111111100000000.00.5122931
27614787810011111111000000.00.5021821

9 rows × 35 columns

df4 = df[(df['is_sp']==0) & (df['pred']==1)]
df4.sort_values(by='prob',ascending=True).head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
27614787810011111111000000.00.5021821
1582839189611111111100000000.00.5122931
2405956127611111111100000000.00.5122931
14395595011110000000000000.00.5433411
2094628544600001111111101000.00.5768731
1974227614211111101111111000.00.5774201
2034345194711111011110011000.00.5999211
1081943209911110111100000000.00.6430611
1944159080100000000000001010.00.6774581

9 rows × 35 columns

df5 = df[(df['is_sp']==0) & (df['pred']==0)]
df5.sort_values(by='prob',ascending=True).head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
1492724955000011100000000000.00.0009460
10254174100000000000000000.00.0017260
2426072545700000000000000000.00.0017260
1011840829700000000000000000.00.0017450
1723376609000000000000000000.00.0022570
2107354400000000010000000.00.0025100
2275261295300000000000000000.00.0030870
631258268400011010000000000.00.0047800
2084605668800000110000000000.00.0047990
661315777700000000000000000.00.0049690
1904065403300000000000000000.00.0049690
1202243765200001000000000000.00.0056890
871660160000001000000000000.00.0056890
701396745300001000000000000.00.0056890
1122095593400001000000000000.00.0056890

15 rows × 35 columns

df6 = df[(df['is_sp']==1) & (df['pred']==0)]
df6.sort_values(by='prob',ascending=False).head(15)
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
1984243871311111110000000001.00.4846880
1272368992311011111100000001.00.3591000
2134733206900000000011000001.00.2810790
1402491442111100001001000001.00.2781190
2265213195800111111111100001.00.2597090
2124726696610010111100000001.00.2327300
2365786940500000011000000001.00.2125210
1612969875811100000000000001.00.1673700
30717725111111100000000001.00.1530460
7224146210000000000000001.00.0944510
671340136210000000000000001.00.0944510
801556935100000010100000001.00.0715460
931738848000000000000001001.00.0708190
941738848000000000000001001.00.0708190
1633010327900000000000000001.00.0287950

15 rows × 35 columns

## copy 问题的出现了,!!! = 等号只是引用内存地址, 变量最好用 copy() 属性!!
fp_dau_m.head()
.dataframe tbody tr th:only-of-type { vertical-align: middle; } .dataframe tbody tr th { vertical-align: top; } .dataframe thead th { text-align: right; }
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9dayX23dayX24dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_sp
04713411111000000000000001.0
15038741000000000000000000.0
210735440000000001110000000.0
310738640000000000000000000.0
411637331100000001111110001.0

5 rows × 33 columns

df.equals(fp_dau_m)
False
df.equals(ydf)
False
  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值