cart

import time

 

import cx_Oracle

import numpy

import pandas

import json

pandas.set_option('expand_frame_repr', False)

from openpyxl import Workbook

 

 

'''-----------------------连接数据库-------------------------------'''

conn=cx_Oracle.connect('dw_sh','fzzQbzKV','192.168.200.85:50001/sjzq')

cursor = conn.cursor()

conn_local=cx_Oracle.connect('hzp/hzp@localhost/xe')

cursor_local=conn_local.cursor()

 

 

'''--------------------EMERGENCY_data--------------------------'''

def EMERGENCY_data(start,end):

sql_EMERGENCY = '''select t.HEADINFO_ID,

sum(a.LAST_6_TALK_SECONDS) LAST_6_TALK_SECONDS,

sum(a.LAST_6_CALL_CNT) LAST_6_CALL_CNT,

sum(a.LAST_6_CALLED_CNT) LAST_6_CALLED_CNT

from DW_SH_CREDIT.ZHPH_HEAD_INFO t

left join DW_SH_CREDIT.ZHPH_EMERGENCY_ANALYSIS a

on t.HEADINFO_ID = a.HEADINFO_ID

where t.REPORT_TIME >= '%s'

and t.REPORT_TIME <= '%s'

and t.USER_TYPE = '1'

group by t.HEADINFO_ID

'''%(start, end)

cursor.execute(sql_EMERGENCY)

col_name_list = [tuple[0] for tuple in cursor.description]

result = cursor.fetchall()

re = pandas.core.frame.DataFrame(result)

[re.rename(columns={i: col_name_list[i]}, inplace=True) for i in range(len(col_name_list))]

return re

 

'''------------------------------TRIP_ANALYSIS_data---------------------------------'''

def TRIP_ANALYSIS_data(start,end):

sql_TRIP_ANALYSIS = '''select t.HEADINFO_ID,

sum(a.TALK_SECONDS) TRIP_TALK_SECONDS,

sum(a.TALK_CNT) TRIP_TALK_CNT

from DW_SH_CREDIT.ZHPH_HEAD_INFO t

left join DW_SH_CREDIT.ZHPH_TRIP_ANALYSIS a

on t.HEADINFO_ID = a.HEADINFO_ID

where t.REPORT_TIME >= '%s'

and t.REPORT_TIME <= '%s'

and t.USER_TYPE = '1'

group by t.HEADINFO_ID

'''%(start, end)

cursor.execute(sql_TRIP_ANALYSIS)

col_name_list = [tuple[0] for tuple in cursor.description]

result = cursor.fetchall()

re = pandas.core.frame.DataFrame(result)

[re.rename(columns={i: col_name_list[i]}, inplace=True) for i in range(len(col_name_list))]

return re

'''--------------------HEADINFO_ID--------------------------'''

def HEADINFO_data(start,end):

sql_HEADINFO_ID = '''select t.HEADINFO_ID,

t.SEARCH_ID,

t.REPORT_TIME,

a.LOAN_CONTRACT_NO,

b.PHONE_LOCATION,

b.AVE_MONTHLY_CONSUMPTION,

b.CURRENT_BALANCE,

b.IF_CALL_EMERGENCY1,

b.IF_CALL_EMERGENCY2,

c.BLACKLIST_CNT,

c.SEARCHED_CNT,

c.LOAN_RECORD_CNT

from DW_SH_CREDIT.ZHPH_HEAD_INFO t

join DW_SH_CREDIT.ZHPH_client_INFO a

on t.CLIENT_PRI_NUMBER = a.PRI_NUMBER

join DW_SH_CREDIT.ZHPH_BASIC_INFO b

on t.HEADINFO_ID = b.HEADINFO_ID

join DW_SH_CREDIT.ZHPH_RISK_ANALYSIS c

on c.HEADINFO_ID = t.HEADINFO_ID

where t.REPORT_TIME >= '%s'

and t.REPORT_TIME <= '%s'

and t.USER_TYPE = '1'

order by t.REPORT_TIME asc

'''%(start, end)

cursor.execute(sql_HEADINFO_ID)

col_name_list = [tuple[0] for tuple in cursor.description]

result = cursor.fetchall()

re = pandas.core.frame.DataFrame(result)

[re.rename(columns={i: col_name_list[i]}, inplace=True) for i in range(len(col_name_list))]

# data.drop_duplicates(subset=['A', 'B'], keep='first', inplace=True)

re.drop_duplicates(subset=['SEARCH_ID', 'LOAN_CONTRACT_NO'], keep='first', inplace=True)

return re

'''----------------------userportrail_data------------------------------'''

def userportrail_data(start,end):

sql_USER_PORTRAIT = '''select a.HEADINFO_ID,

a.CONTACT_DISTRIBUTION_RATIO,

a.BOTH_CALL_CNT,

a.ACTIVE_DAYS_TOTAL_DAYS,

a.NIGHT_ACTIVITY_RATIO,

a.SPECIAL_CALL_INFO

from DW_SH_CREDIT.ZHPH_HEAD_INFO t

left join DW_SH_CREDIT.ZHPH_USER_PORTRAIT a

on t.HEADINFO_ID = a.HEADINFO_ID

where t.REPORT_TIME >= '%s'

and t.REPORT_TIME <= '%s'

and t.USER_TYPE = '1'

'''%(start,end)

cursor.execute(sql_USER_PORTRAIT)

result = cursor.fetchall()

re=[]

colum_list = ['HEADINFO_ID', 'CONTACT_DISTRIBUTION_RATIO', 'BOTH_CALL_CNT', 'ACTIVE_DAYS_TOTAL_DAYS','NIGHT_ACTIVITY_RATIO', \

'110号码通话情况', '120号码通话情况', 'p2p号码通话情况', '催收类号码通话情况', \

'贷款类号码通话情况', '赌博类号码通话情况', '套现类号码通话情况', '法律类号码通话情况', \

'假证类号码通话情况', '抵押类号码通话情况', '投资类号码通话情况', '房产类号码通话情况', \

'银行类号码通话情况', '酒店类号码通话情况', '出行类号码通话情况', '娱乐类号码通话情况', \

'快递类号码通话情况', '医疗类号码通话情况']

count = 1

for row in result:

rhzx = []

for i in range(4):

rhzx.append(row[i])

rhzx.append(row[4].read())

ls = json.loads(row[5].read()) # 先读取cx_oracle.log数据,然后转化为字典

ls = [ls[i]['talk_cnt'] for i in range(len(ls))]

rhzx.extend(ls)

re.append(rhzx)

print(count)

count += 1

re = pandas.core.frame.DataFrame(re)

[re.rename(columns={i: colum_list[i]}, inplace=True) for i in range(len(colum_list))]

return re

 

'''---------------------------MONTHLY_CONSUMPTION_data----------------------------'''

def MONTHLY_CONSUMPTION_data(start,end):

sql_CONSUMPTION = '''select t.HEADINFO_ID,

a.CONSUMPTION_MONTH,

a.CALL_SECONDS,

a.CALL_CNT1,

a.CALLED_SECONDS,

a.CALLED_CNT,

a.CALL_CONSUMPTION

from DW_SH_CREDIT.ZHPH_HEAD_INFO t

left join DW_SH_CREDIT.ZHPH_MONTHLY_CONSUMPTION a

on t.HEADINFO_ID = a.HEADINFO_ID

where t.REPORT_TIME >= '%s'

and t.REPORT_TIME <= '%s'

and t.USER_TYPE = '1'

'''%(start,end)

cursor.execute(sql_CONSUMPTION)

col_name_list = [tuple[0] for tuple in cursor.description]

result = cursor.fetchall()

re = pandas.core.frame.DataFrame(result)

[re.rename(columns={i: col_name_list[i]}, inplace=True) for i in range(len(col_name_list))]

re['CALL_CNT1']=re['CALL_CNT1'].astype(numpy.float)

re['CALLED_SECONDS']=re['CALLED_SECONDS'].astype(numpy.float)

re['CALL_SECONDS']=re['CALL_SECONDS'].astype(numpy.float)

re['CALLED_CNT']=re['CALLED_CNT'].astype(numpy.float)

re['CALL_CONSUMPTION']=re['CALL_CONSUMPTION'].astype(numpy.float)

ls=re[['CONSUMPTION_MONTH','CALL_SECONDS','CALL_CNT1','CALLED_SECONDS','CALLED_CNT','CALL_CONSUMPTION']].groupby(re['HEADINFO_ID'])\

.apply(lambda x:(x[(x.CONSUMPTION_MONTH!=x.CONSUMPTION_MONTH.max())&(x.CONSUMPTION_MONTH!=x.CONSUMPTION_MONTH.min())]).mean()).reset_index()

CALL_SECONDS=pandas.core.frame.DataFrame(ls[ls['level_1']=='CALL_SECONDS'])

del CALL_SECONDS['level_1']

CALL_SECONDS.rename(columns={0:'MID4AVG_CALL_SECONDS'},inplace=True)

CALL_CNT1=pandas.core.frame.DataFrame(ls[ls['level_1']=='CALL_CNT1'])

del CALL_CNT1['level_1']

CALL_CNT1.rename(columns={0:'MID4AVG_CALL_CNT1'},inplace=True)

CALLED_SECONDS = pandas.core.frame.DataFrame(ls[ls['level_1'] == 'CALLED_SECONDS'])

del CALLED_SECONDS['level_1']

CALLED_SECONDS.rename(columns={0: 'MID4AVG_CALLED_SECONDS'}, inplace=True)

CALLED_CNT = pandas.core.frame.DataFrame(ls[ls['level_1'] == 'CALLED_CNT'])

del CALLED_CNT['level_1']

CALLED_CNT.rename(columns={0: 'MID4AVG_CALLED_CNT'}, inplace=True)

CALL_CONSUMPTION = pandas.core.frame.DataFrame(ls[ls['level_1'] == 'CALL_CONSUMPTION'])

del CALL_CONSUMPTION['level_1']

CALL_CONSUMPTION.rename(columns={0: 'MID4AVG_CALL_CONSUMPTION'}, inplace=True)

res=pandas.merge(CALL_SECONDS,CALL_CNT1,on='HEADINFO_ID',how='left')

res=pandas.merge(res,CALLED_SECONDS,on='HEADINFO_ID',how='left')

res=pandas.merge(res,CALLED_CNT,on='HEADINFO_ID',how='left')

res=pandas.merge(res,CALL_CONSUMPTION,on='HEADINFO_ID',how='left')

return res

 

'''---------------------------AREA_ANALYSIS_data------------------------------'''

def AREA_ANALYSIS_data(start,end):

sql_AREA_ANALYSIS = '''select t.HEADINFO_ID,

b.PHONE_LOCATION as own_location,

a.ANALYSIS_AREA,

a.CONTACT_PHONE_CNT,

a.CALL_SECONDS,

a.CALL_CNT,

a.CALLED_SECONDS,

a.CALLED_CNT

from DW_SH_CREDIT.ZHPH_HEAD_INFO t

left join DW_SH_CREDIT.ZHPH_AREA_ANALYSIS a

on t.HEADINFO_ID = a.HEADINFO_ID

left join DW_SH_CREDIT.ZHPH_BASIC_INFO b

on t.HEADINFO_ID = b.HEADINFO_ID

where t.REPORT_TIME >= '%s'

and t.REPORT_TIME <= '%s'

and t.USER_TYPE = '1'

'''%(start,end)

cursor.execute(sql_AREA_ANALYSIS)

col_name_list = [tuple[0] for tuple in cursor.description]

result = cursor.fetchall()

re = pandas.core.frame.DataFrame(result)

[re.rename(columns={i: col_name_list[i]}, inplace=True) for i in range(len(col_name_list))]

ownCall=re[re['OWN_LOCATION']==re['ANALYSIS_AREA']].copy()

del ownCall['OWN_LOCATION']

del ownCall['ANALYSIS_AREA']

ownCall.rename(columns={'CONTACT_PHONE_CNT': 'OWN_CONTACT_PHONE_CNT'}, inplace=True)

ownCall.rename(columns={'CALL_SECONDS': 'OWN_CALL_SECONDS'}, inplace=True)

ownCall.rename(columns={'CALL_CNT': 'OWN_CALL_CNT'}, inplace=True)

ownCall.rename(columns={'CALLED_SECONDS': 'OWN_CALLED_SECONDS'}, inplace=True)

ownCall.rename(columns={'CALLED_CNT': 'OWN_CALLED_CNT'}, inplace=True)

re1=re[re['OWN_LOCATION']!=re['ANALYSIS_AREA']].copy()

re1['CONTACT_PHONE_CNT']=re1['CONTACT_PHONE_CNT'].astype(numpy.float)

re1['CALL_SECONDS']=re1['CALL_SECONDS'].astype(numpy.float)

re1['CALL_CNT']=re1['CALL_CNT'].astype(numpy.float)

re1['CALLED_SECONDS']=re1['CALLED_SECONDS'].astype(numpy.float)

re1['CALLED_CNT']=re1['CALLED_CNT'].astype(numpy.float)

other_conph_cnt=re1.groupby('HEADINFO_ID').agg({'CONTACT_PHONE_CNT':{'sum','median','count','var','max'}})

other_conph_cnt.columns=other_conph_cnt.columns.droplevel()

other_conph_cnt.rename(columns={'var': 'other_cpc_var','max': 'other_cpc_max','sum': 'other_cpc_sum','count': 'other_cpc_count','median': 'other_cpc_median'}, inplace=True)

 

re2=re1[re1['CALL_CNT']>0].copy()

other1_call_sec = re2.groupby('HEADINFO_ID').agg({'CALL_SECONDS': {'sum', 'max', 'mean', 'median'}})

other1_call_sec.columns = other1_call_sec.columns.droplevel()

other1_call_sec.rename(columns={'max': 'other1_callsec_max', 'sum': 'other1_callsec_sum','mean': 'other1_callsec_mean','median': 'other1_callsec_median'}, inplace=True)

other1_call_CNT=re2.groupby('HEADINFO_ID').agg({'CALL_CNT':{'sum','max','mean','count','median'}})

other1_call_CNT.columns=other1_call_CNT.columns.droplevel()

other1_call_CNT.rename(columns={'sum': 'other1_callcnt_sum','max': 'other1_callcnt_max','mean': 'other1_callcnt_mean','count': 'other1_callcnt_count','median': 'other1_callcnt_median'}, inplace=True)

other_call = pandas.merge(other1_call_sec, other1_call_CNT, how='left', on='HEADINFO_ID')

other_call['other1_sec/cnt']=other_call['other1_callsec_sum']/other_call['other1_callcnt_sum']

 

re3=re1[re1['CALLED_CNT']>1].copy()

other_called_sec=re3.groupby('HEADINFO_ID').agg({'CALLED_SECONDS':{'sum','mean','median','max'}})

other_called_sec.columns=other_called_sec.columns.droplevel()

other_called_sec.rename(columns={'mean': 'other2_calledsec_mean','sum': 'other2_calledsec_sum','median': 'other2_calledsec_median','max': 'other2_calledsec_max'}, inplace=True)

other_called_CNT=re3.groupby('HEADINFO_ID').agg({'CALLED_CNT':{'sum','mean','max','count','median'}})

other_called_CNT.columns=other_called_CNT.columns.droplevel()

other_called_CNT.rename(columns={'mean': 'other2_calledCNT_mean','sum': 'other2_calledCNT_sum','max': 'other2_calledCNT_max','count': 'other2_calledCNT_count','median': 'other2_calledCNT_median'}, inplace=True)

other_called=pandas.merge(other_called_sec,other_called_CNT,how='left',on='HEADINFO_ID')

other_called['other2_sec/cnt']=other_called['other2_calledsec_sum']/other_called['other2_calledCNT_sum']

together=pandas.merge(ownCall,other_conph_cnt,how='left',on='HEADINFO_ID')

together=pandas.merge(together,other_call,how='left',on='HEADINFO_ID')

together=pandas.merge(together,other_called,how='left',on='HEADINFO_ID')

ls=['HEADINFO_ID','OWN_CONTACT_PHONE_CNT','OWN_CALL_SECONDS','OWN_CALL_CNT','OWN_CALLED_SECONDS','OWN_CALLED_CNT','other_cpc_median',\

'other_cpc_var','other_cpc_sum','other_cpc_count','other_cpc_max','other1_callsec_mean','other1_callsec_median','other1_callsec_sum',\

'other1_callsec_max','other1_callcnt_median','other1_callcnt_mean','other1_callcnt_sum','other1_callcnt_count','other1_callcnt_max',\

'other1_sec/cnt','other2_calledsec_mean','other2_calledsec_median','other2_calledsec_sum','other2_calledsec_max','other2_calledCNT_median',\

'other2_calledCNT_mean','other2_calledCNT_sum','other2_calledCNT_count','other2_calledCNT_max','other2_sec/cnt']

together = together.loc[:, ls]

return together

 

def data_in_oracle(st,ed):

HEADINFO=HEADINFO_data(st,ed)

EMERGENCY=EMERGENCY_data(st,ed)

TRIP_ANALYSIS=TRIP_ANALYSIS_data(st,ed)

MONTHLY_CONSUMPTION=MONTHLY_CONSUMPTION_data(st,ed)

AREA_ANALYSIS=AREA_ANALYSIS_data(st,ed)

userportrail=userportrail_data(st,ed)

together = pandas.merge(HEADINFO, userportrail, on='HEADINFO_ID')

together = pandas.merge(together, EMERGENCY, on='HEADINFO_ID',how='left')

together = pandas.merge(together, TRIP_ANALYSIS, on='HEADINFO_ID',how='left')

together = pandas.merge(together, MONTHLY_CONSUMPTION, on='HEADINFO_ID',how='left')

together = pandas.merge(together, AREA_ANALYSIS, on='HEADINFO_ID',how='left')

sqlinsert = '''insert into USER_CONSUMPTION_06 values(:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,

:11,:12,:13,:14,:15,:16,:17,:18,:19,:20,

:21,:22,:23,:24,:25,:26,:27,:28,:29,:30,

:31,:32,:33,:34,:35,:36,:37,:38,:39,:40,

:41,:42,:43,:44,:45,:46,:47,:48,:49,:50,

:51,:52,:53,:54,:55,:56,:57,:58,:59,:60,

:61,:62,:63,:64,:65,:66,:67,:68,:69,:70,

:71,:72,:73,:74)'''

data = together.where(together.notnull(), None)

la = []

data = data.values.tolist()

[la.append(tuple(row)) for row in data]

cursor_local.executemany(sqlinsert, la)

conn_local.commit()

print('success')

 

 

 

'''-----------------------将各张表查询出结果并转为excel--------------------------'''

if __name__ == '__main__':

st = '2018-06-01 00:00:00'

ed = '2018-07-01 06:00:00'

data_in_oracle(st,ed)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值