section 7

import pandas as pd
import numpy as np
import pymysql
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
%matplotlib inline
# 数据库引擎
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/datascience')

读取数据

# 读取数据
data = 'data/section7-dau.csv'
dau = pd.read_csv(data)

# 写入 MYSQL
# dau.to_sql('s7_dau',engine,index=False)

dau.head()
region_monthregion_dayapp_nameuser_iddevice
02013-012013-01-01game-0210061580FP
12013-012013-01-01game-0210154440FP
22013-012013-01-01game-0210164762SP
32013-012013-01-01game-0210165615FP
42013-012013-01-01game-0210321356FP
# 查看数据信息
dau.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 48988 entries, 0 to 48987
Data columns (total 5 columns):
region_month    48988 non-null object
region_day      48988 non-null object
app_name        48988 non-null object
user_id         48988 non-null int64
device          48988 non-null object
dtypes: int64(1), object(4)
memory usage: 1.9+ MB
print(dau.region_month.value_counts())
print(dau.region_day.unique())
print(dau.device.value_counts())
2013-01    25847
2013-02    23141
Name: region_month, dtype: int64
['2013-01-01' '2013-01-02' '2013-01-03' '2013-01-04' '2013-01-05'
 '2013-01-06' '2013-01-07' '2013-01-08' '2013-01-09' '2013-01-10'
 '2013-01-11' '2013-01-12' '2013-01-13' '2013-01-14' '2013-01-15'
 '2013-01-16' '2013-01-17' '2013-01-18' '2013-01-19' '2013-01-20'
 '2013-01-21' '2013-01-22' '2013-01-23' '2013-01-24' '2013-01-25'
 '2013-01-26' '2013-01-27' '2013-01-28' '2013-01-29' '2013-01-30'
 '2013-01-31' '2013-02-01' '2013-02-02' '2013-02-03' '2013-02-04'
 '2013-02-05' '2013-02-06' '2013-02-07' '2013-02-08' '2013-02-09'
 '2013-02-10' '2013-02-11' '2013-02-12' '2013-02-13' '2013-02-14'
 '2013-02-15' '2013-02-16' '2013-02-17' '2013-02-18' '2013-02-19'
 '2013-02-20' '2013-02-21' '2013-02-22' '2013-02-23' '2013-02-24'
 '2013-02-25' '2013-02-26' '2013-02-27' '2013-02-28']
FP    30331
SP    18657
Name: device, dtype: int64

关于用户是否进行了账号迁转的数据的整理

提取需要的数据列,去除重复项,得到 用户按月份和设备登陆的信息
mau = dau[['region_month','user_id','device']]
mau.head()
region_monthuser_iddevice
02013-0110061580FP
12013-0110154440FP
22013-0110164762SP
32013-0110165615FP
42013-0110321356FP
# 重复数据 (用户在某一月使用相同设备登陆)
print(mau.duplicated().sum())
mau.drop_duplicates(inplace=True)
print(mau.duplicated().sum())
46007
0


D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  This is separate from the ipykernel package so we can avoid doing imports until
非智能手机和智能手机分开
fp = dau[dau['device']=='FP'][['region_month','user_id','device']].drop_duplicates()
sp = dau[dau['device']=='SP'][['region_month','user_id','device']].drop_duplicates()
print(fp.info())
print(sp.info())
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1755 entries, 0 to 48901
Data columns (total 3 columns):
region_month    1755 non-null object
user_id         1755 non-null int64
device          1755 non-null object
dtypes: int64(1), object(2)
memory usage: 54.8+ KB
None
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1226 entries, 2 to 48834
Data columns (total 3 columns):
region_month    1226 non-null object
user_id         1226 non-null int64
device          1226 non-null object
dtypes: int64(1), object(2)
memory usage: 38.3+ KB
None
分别获取1月份和2月份的数据
# 分别获取1月份和2月份的数据

fp_m1 = fp[fp['region_month']=='2013-01']
fp_m2 = fp[fp['region_month']=='2013-02']

sp_m1 = sp[sp['region_month']=='2013-01']
sp_m2 = sp[sp['region_month']=='2013-02']
1月份的非智能手机用户在2月份的访问情况
# 1月份的非智能手机用户在2月份的访问情况

mau['is_access'] = 1
fp_m1 = pd.merge(fp_m1,mau[mau['region_month']=='2013-02'][['user_id','is_access']],how='left',on='user_id')
fp_m1['is_access'].fillna(0,inplace=True)

fp_m1.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  This is separate from the ipykernel package so we can avoid doing imports until
region_monthuser_iddeviceis_access
02013-0110061580FP1.0
12013-0110154440FP0.0
22013-0110165615FP1.0
32013-0110321356FP1.0
42013-0110447112FP1.0
1月份访问过游戏的非智能手机用户在2月份是否是继续通过非智能手机来访问的
# 1月份访问过游戏的非智能手机用户在2月份是否是继续通过非智能手机来访问的

fp_m2['is_fp'] = 1
fp_m1 = pd.merge(fp_m1,fp_m2[['user_id','is_fp']],how='left',on='user_id')
fp_m1['is_fp'].fillna(0,inplace=True)

fp_m1.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  This is separate from the ipykernel package so we can avoid doing imports until
region_monthuser_iddeviceis_accessis_fp
02013-0110061580FP1.01.0
12013-0110154440FP0.00.0
22013-0110165615FP1.01.0
32013-0110321356FP1.01.0
42013-0110447112FP1.01.0
1月份访问过游戏的非智能手机用户在2月份是否是通过智能手机来访问的
# 1月份访问过游戏的非智能手机用户在2月份是否是通过智能手机来访问的

sp_m2['is_sp'] = 1
fp_m1 = pd.merge(fp_m1,sp_m2[['user_id','is_sp']],how='left',on='user_id')
fp_m1['is_sp'].fillna(0,inplace=True)

fp_m1.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  This is separate from the ipykernel package so we can avoid doing imports until
region_monthuser_iddeviceis_accessis_fpis_sp
02013-0110061580FP1.01.00.0
12013-0110154440FP0.00.00.0
22013-0110165615FP1.01.00.0
32013-0110321356FP1.01.00.0
42013-0110447112FP1.01.00.0
1月份通过非智能手机访问但2月份没有访问的用户,或者通过智能手机访问的用户
# 1月份通过非智能手机访问但2月份没有访问的用户,或者通过智能手机访问的用户

fp_m1 = fp_m1[(fp_m1['is_access']==0) | (fp_m1['is_sp']==1)]
fp_m1.head()
region_monthuser_iddeviceis_accessis_fpis_sp
12013-0110154440FP0.00.00.0
72013-0110528830FP0.00.00.0
202013-011163733FP1.00.01.0
212013-0111727630FP0.00.00.0
432013-0113401362FP1.00.01.0
以上得到的即是可用于逻辑回归的标签项

关于是否是每天访问游戏的数据的整理

# 标记每天登陆记录

fp_dau = dau[(dau['device']=='FP') & (dau['region_month']=='2013-01')]
fp_dau['is_access'] = 1
fp_dau.head()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py:4: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  after removing the cwd from sys.path.
region_monthregion_dayapp_nameuser_iddeviceis_access
02013-012013-01-01game-0210061580FP1
12013-012013-01-01game-0210154440FP1
32013-012013-01-01game-0210165615FP1
42013-012013-01-01game-0210321356FP1
62013-012013-01-01game-0210447112FP1
# COLUMNS 名字
b = []
for a in np.arange(1,32):
    b.append('X'+str(a)+'day')
# b.insert(0,'user_id')

# 透视表转化为登陆信息
fp_dau_pivot = pd.pivot_table(fp_dau, values='is_access', columns='region_day', index='user_id', fill_value=0)
fp_dau_pivot.columns = b
fp_dau_pivot.reset_index(inplace=True)
fp_dau_pivot.head()
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X22dayX23dayX24dayX25dayX26dayX27dayX28dayX29dayX30dayX31day
0397286111111111...1111111111
1471341111100000...0000000000
2503874100000000...0000000000
3512250111111111...1111111111
4513811000000000...1000001101

5 rows × 32 columns

# 将2月份的访问数据和智能手机用户数据合并,注意这里是 inner 。。

fp_dau_m = pd.merge(fp_dau_pivot, fp_m1[['user_id','is_sp']], how='inner', on='user_id')

fp_dau_m.head()
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X23dayX24dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_sp
0471341111100000...0000000001.0
1503874100000000...0000000000.0
21073544000000000...1110000000.0
31073864000000000...0000000000.0
41163733110000000...1111110001.0

5 rows × 33 columns

fp_dau_m.isna().sum().sum()
0
fp_dau_m.is_sp.value_counts()
0.0    190
1.0     62
Name: is_sp, dtype: int64
以上数据显示,is_sp 指示: 1表示2月份通过智能手机来访问的用户, 0表示用户为流失用户

2月份流失的用户数有190个, 更换为智能手机用户数为62个!

逻辑回归处理

1.sklearn

通过修改 solve 和 惩罚系数 C ,可以将模型的准确度提升至 100%
from sklearn.linear_model import LogisticRegression

lr = LogisticRegression(solver='lbfgs',C=10)

x = fp_dau_m.iloc[:,1:-1]
y = fp_dau_m.iloc[:,-1]

lr.fit(x,y)

print('系数项:',lr.coef_)
print('截距项:',lr.intercept_)
print('得分是:',lr.score(x,y))
系数项: [[ 1.64264315  0.38232509  0.27375659  1.77818234 -1.2604587  -0.62425027
   1.64964331  0.94366796 -0.30971957 -2.45689215  1.05453162 -0.49567095
   1.37452985 -0.79198757 -1.39648934  0.18038175 -0.34026571  1.01401641
  -0.49919155 -0.25791649  0.98296119  1.03952236 -1.03446927  1.53177282
  -0.12212919  0.30942289  0.31267693 -0.08203749  1.32893163  1.57890364
   1.29380472]]
截距项: [-3.9031072]
得分是: 0.9047619047619048
yp = lr.predict_proba(x)[:,1]
df = fp_dau_m.copy()
df['prob'] = yp
df['pred'] = df['prob'].apply(lambda x: 1 if x > 0.5 else 0)
df.head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
0471341111100000...00000001.00.5433411
1503874100000000...00000000.00.0944510
21073544000000000...10000000.00.0025100
31073864000000000...00000000.00.0255670
41163733110000000...11110001.00.8498381
51454629000000000...00000000.00.0738790
61557628000000001...00000010.00.0512210
72241462100000000...00000001.00.0944510
82313236000000000...00000000.00.0853850
92477685000000000...10000000.00.0175460
102541741000000000...00000000.00.0017260
112628661000001000...01000000.00.0145150
123509436010111011...11111111.00.9879401
133509436010111011...11111111.00.9879401
143955950111100000...00000000.00.5433411

15 rows × 35 columns

df.groupby(['is_sp','pred'])['user_id'].count().reset_index()
is_sppreduser_id
00.00181
10.019
21.0015
31.0147
len(df[df['is_sp']==df['pred']])/len(df)
0.9047619047619048
此模型,无需修改任何参数即可达到准确度 100% 。 重点在于 solve 和 C 的参数。
from sklearn.linear_model import LogisticRegressionCV

lr = LogisticRegressionCV(cv=10)

x = fp_dau_m.iloc[:,1:-1]
y = fp_dau_m.iloc[:,-1]

lr.fit(x,y)

print('系数项:',lr.coef_)
print('截距项:',lr.intercept_)
print('-----------------------------------------------')
print('得分是: ',lr.score(x,y))
系数项: [[ 0.66247469  0.39566209  0.12089587  0.72621501 -0.14485039 -0.11496137
   0.50433275  0.25667173  0.11561233 -0.48159577  0.23713178 -0.12897139
   0.31542595 -0.16714406 -0.1914315  -0.09390318 -0.05036135  0.0924934
  -0.14949742 -0.05918408  0.52355482  0.58543392  0.0882812   0.39783666
   0.07477356  0.14874974  0.39921228  0.38402639  0.68729765  0.6331324
   0.55885631]]
截距项: [-2.95546571]
-----------------------------------------------
得分是:  0.8928571428571429

statsmodels

import statsmodels.api as sm
import statsmodels.formula.api as fsm     # 这个是用公式去拟合,不好用

x = fp_dau_m.iloc[:,1:-1]
x['intercept'] = 1.0          # 此处是为logistics回归添加截距项
y = fp_dau_m.iloc[:,-1]

logit = sm.Logit(y, x)
result = logit.fit(method='bfgs',maxiter=100)
Warning: Maximum number of iterations has been exceeded.
         Current function value: 0.222887
         Iterations: 100
         Function evaluations: 101
         Gradient evaluations: 101


C:\Users\sylva\AppData\Roaming\Python\Python36\site-packages\statsmodels\base\model.py:508: ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
  "Check mle_retvals", ConvergenceWarning)
# result1 = logit.fit_regularized(alpha=5)
result.pred_table()
array([[180.,  10.],
       [ 14.,  48.]])
# result1.pred_table()
print(result.summary2())
                         Results: Logit
=================================================================
Model:              Logit            Pseudo R-squared: 0.601     
Dependent Variable: is_sp            AIC:              176.3352  
Date:               2018-08-24 12:07 BIC:              289.2770  
No. Observations:   252              Log-Likelihood:   -56.168   
Df Model:           31               LL-Null:          -140.60   
Df Residuals:       220              LLR p-value:      6.6358e-21
Converged:          0.0000           Scale:            1.0000    
------------------------------------------------------------------
               Coef.   Std.Err.     z     P>|z|    [0.025   0.975]
------------------------------------------------------------------
X1day          1.9894    0.8047   2.4720  0.0134   0.4121   3.5666
X2day          0.3311    1.0705   0.3093  0.7571  -1.7671   2.4293
X3day          0.3793    0.9406   0.4033  0.6867  -1.4641   2.2227
X4day          2.0422    0.8359   2.4430  0.0146   0.4038   3.6805
X5day         -1.7597    1.1991  -1.4675  0.1422  -4.1100   0.5906
X6day         -0.6679    1.1717  -0.5701  0.5686  -2.9643   1.6285
X7day          2.0157    1.1176   1.8036  0.0713  -0.1747   4.2061
X8day          1.2119    1.3505   0.8974  0.3695  -1.4350   3.8589
X9day         -0.4495    1.1874  -0.3786  0.7050  -2.7768   1.8778
X10day        -3.2374    1.5580  -2.0779  0.0377  -6.2911  -0.1837
X11day         1.4392    1.2234   1.1764  0.2394  -0.9586   3.8370
X12day        -0.6389    1.5297  -0.4176  0.6762  -3.6370   2.3592
X13day         1.7797    1.1424   1.5579  0.1193  -0.4594   4.0188
X14day        -1.1242    1.2455  -0.9026  0.3668  -3.5653   1.3170
X15day        -1.8115    1.3050  -1.3881  0.1651  -4.3694   0.7463
X16day         0.4940    1.1666   0.4234  0.6720  -1.7925   2.7804
X17day        -0.4448    1.2234  -0.3636  0.7162  -2.8427   1.9531
X18day         1.4321    1.1465   1.2491  0.2116  -0.8150   3.6791
X19day        -0.6132    1.1990  -0.5114  0.6091  -2.9632   1.7369
X20day        -0.3130    1.4007  -0.2235  0.8232  -3.0585   2.4324
X21day         0.9587    1.2558   0.7634  0.4452  -1.5027   3.4201
X22day         1.1954    1.1238   1.0637  0.2875  -1.0072   3.3980
X23day        -1.5371    1.2303  -1.2494  0.2115  -3.9486   0.8743
X24day         1.8445    1.1038   1.6710  0.0947  -0.3190   4.0080
X25day         0.1292    1.5317   0.0844  0.9328  -2.8727   3.1312
X26day         0.3131    1.4280   0.2192  0.8265  -2.4858   3.1119
X27day         0.3365    1.2965   0.2596  0.7952  -2.2045   2.8776
X28day        -0.3918    1.8515  -0.2116  0.8324  -4.0207   3.2372
X29day         1.5941    1.0565   1.5088  0.1314  -0.4767   3.6648
X30day         1.9943    1.2117   1.6459  0.0998  -0.3806   4.3692
X31day         1.5214    1.1798   1.2896  0.1972  -0.7908   3.8337
intercept     -4.2502    0.5904  -7.1985  0.0000  -5.4074  -3.0930
=================================================================
# print(result1.summary2())
xx = fp_dau_m.iloc[:,1:-1]
xx['intercept'] = 1.0         # 预测也要为logistics回归添加截距项

y_p = result.predict(xx)

ydf = fp_dau_m.copy()
ydf['prob'] = y_p
ydf['pred'] = ydf['prob'].apply(lambda x: 1 if x > 0.5 else 0)
ydf.head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
0471341111100000...00000001.00.6205061
1503874100000000...00000000.00.0944160
21073544000000000...10000000.00.0008660
31073864000000000...00000000.00.0191670
41163733110000000...11110001.00.8705761
51454629000000000...00000000.00.0779510
61557628000000001...00000010.00.0399910
72241462100000000...00000001.00.0944160
82313236000000000...00000000.00.0827390
92477685000000000...10000000.00.0159690
102541741000000000...00000000.00.0005600
112628661000001000...01000000.00.0099020
123509436010111011...11111111.00.9924561
133509436010111011...11111111.00.9924561
143955950111100000...00000000.00.6205061

15 rows × 35 columns

ydf.groupby(['is_sp','pred'])['user_id'].count().reset_index()
is_sppreduser_id
00.00180
10.0110
21.0014
31.0148
len(ydf[ydf['is_sp']==ydf['pred']])/len(ydf)
0.9047619047619048

结果观察

根据 sklearn 预测的结果,有9名用户预测为1,即进行了账号迁转,但实际并没有。 根据过去的访问情况来推断,这些用户应该进行了账号迁转,然而实际却是流失的用户群体。

df.head(10)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
0471341111100000...00000001.00.5433411
1503874100000000...00000000.00.0944510
21073544000000000...10000000.00.0025100
31073864000000000...00000000.00.0255670
41163733110000000...11110001.00.8498381
51454629000000000...00000000.00.0738790
61557628000000001...00000010.00.0512210
72241462100000000...00000001.00.0944510
82313236000000000...00000000.00.0853850
92477685000000000...10000000.00.0175460

10 rows × 35 columns

df1 = df[(df['is_sp']==1) & (df['pred']==1)]
df1.sort_values(by='prob',ascending=True).head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
22852776438111111111...00000001.00.5122931
17132762652111111111...00000001.00.5122931
15527800629111100000...00000001.00.5433411
0471341111100000...00000001.00.5433411
368645980000100000...00001001.00.5515741
378645980000100000...00001001.00.5515741
16932500332111111111...10000001.00.5879231
5511600349011111111...00011111.00.6841981
5611600349011111111...00011111.00.6841981
14625787360000010111...00100001.00.6962951
14525787360000010111...00100001.00.6962951
41163733110000000...11110001.00.8498381
4810406653011111110...10111111.00.8653931
4910406653011111110...10111111.00.8653931
16531066299011101111...11101101.00.9519701

15 rows × 35 columns

df2 = df[(df['is_sp']==1) & (df['pred']==1)]
df2.sort_values(by='prob',ascending=False).head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
13624791702110101111...11111111.00.9986181
13724791702110101111...11111111.00.9986181
449567562111111111...11111111.00.9963021
439567562111111111...11111111.00.9963021
13924900784111111111...11111111.00.9939231
12423113079111111111...11111111.00.9939231
13324581383111111111...11111111.00.9939231
13424581383111111111...11111111.00.9939231
13824900784111111111...11111111.00.9939231
12323113079111111111...11111111.00.9939231
11421551429111111111...11111111.00.9939231
14727003770111111111...11111111.00.9939231
14827003770111111111...11111111.00.9939231
15027602710111111111...11111111.00.9939231
15127602710111111111...11111111.00.9939231

15 rows × 35 columns

df3 = df[(df['is_sp']==0) & (df['pred']==1)]
df3.sort_values(by='prob',ascending=False).head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
19441590801000000000...00001010.00.6774581
10819432099111101111...00000000.00.6430611
20343451947111110111...10011000.00.5999211
19742276142111111011...11111000.00.5774201
20946285446000011111...11101000.00.5768731
143955950111100000...00000000.00.5433411
15828391896111111111...00000000.00.5122931
24059561276111111111...00000000.00.5122931
276147878100111111...11000000.00.5021821

9 rows × 35 columns

df4 = df[(df['is_sp']==0) & (df['pred']==1)]
df4.sort_values(by='prob',ascending=True).head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
276147878100111111...11000000.00.5021821
15828391896111111111...00000000.00.5122931
24059561276111111111...00000000.00.5122931
143955950111100000...00000000.00.5433411
20946285446000011111...11101000.00.5768731
19742276142111111011...11111000.00.5774201
20343451947111110111...10011000.00.5999211
10819432099111101111...00000000.00.6430611
19441590801000000000...00001010.00.6774581

9 rows × 35 columns

df5 = df[(df['is_sp']==0) & (df['pred']==0)]
df5.sort_values(by='prob',ascending=True).head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
14927249550000111000...00000000.00.0009460
102541741000000000...00000000.00.0017260
24260725457000000000...00000000.00.0017260
10118408297000000000...00000000.00.0017450
17233766090000000000...00000000.00.0022570
21073544000000000...10000000.00.0025100
22752612953000000000...00000000.00.0030870
6312582684000110100...00000000.00.0047800
20846056688000001100...00000000.00.0047990
6613157777000000000...00000000.00.0049690
19040654033000000000...00000000.00.0049690
12022437652000010000...00000000.00.0056890
8716601600000010000...00000000.00.0056890
7013967453000010000...00000000.00.0056890
11220955934000010000...00000000.00.0056890

15 rows × 35 columns

df6 = df[(df['is_sp']==1) & (df['pred']==0)]
df6.sort_values(by='prob',ascending=False).head(15)
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X25dayX26dayX27dayX28dayX29dayX30dayX31dayis_spprobpred
19842438713111111100...00000001.00.4846880
12723689923110111111...00000001.00.3591000
21347332069000000000...11000001.00.2810790
14024914421111000010...01000001.00.2781190
22652131958001111111...11100001.00.2597090
21247266966100101111...00000001.00.2327300
23657869405000000110...00000001.00.2125210
16129698758111000000...00000001.00.1673700
307177251111111000...00000001.00.1530460
72241462100000000...00000001.00.0944510
6713401362100000000...00000001.00.0944510
8015569351000000101...00000001.00.0715460
9317388480000000000...00001001.00.0708190
9417388480000000000...00001001.00.0708190
16330103279000000000...00000001.00.0287950

15 rows × 35 columns

copy 问题的出现了,!!! = 等号只是引用内存地址, 变量最好用 copy() 属性!!

fp_dau_m.head()
user_idX1dayX2dayX3dayX4dayX5dayX6dayX7dayX8dayX9day...X23dayX24dayX25dayX26dayX27dayX28dayX29dayX30dayX31dayis_sp
0471341111100000...0000000001.0
1503874100000000...0000000000.0
21073544000000000...1110000000.0
31073864000000000...0000000000.0
41163733110000000...1111110001.0

5 rows × 33 columns

df.equals(fp_dau_m)
False
df.equals(ydf)
False

转载于:https://www.cnblogs.com/cvlas/p/9529061.html

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值