工作,py

目录


目录
pandas多列求和
pandas常用方法实践之大众
python中Excel操作
numpy操作
图像识别
python常用代码段
python函数
python连接hivesql
dataframe相关
截面数据循环左连接
######################################python相关
1.对特定列求和:
方法一:

    data['col3']=data[['col1','col2']].sum(axis=1)

方法二:

    df['col3'] = df.apply(lambda x: x['col1'] + x['col2'], axis=1)

2.对连续某几列求和:

    data['sum'] = data.iloc[:,0:3].sum(axis=1)

    例如对第1列至第3列求和

3.对所有列求和(前提是所有列都为数值型)

    data['sum'] = data.sum(axis=1)

    data['sum'] = df.apply(lambda x: x.sum(), axis=1) 



#########################################pandas常用方法实践之大众
#########################################
#########################################

 
import numpy as np
import pandas as pd
#####################################,1,导入数据,
pt = 'D:\\xxx.xlsx'
df1 = pd.read_excel(pt,sheet_name='数据源1')
 
df1['分数区间'].value_counts()
df1['分数区间'] = df1['分数区间'].replace("(0.817.1]","(0.817,1]")
df1.rename(columns={'分数区间':'old分数区间'},inplace=True)
df1.columns
df1.info()
 
fenshu_map = df1[['old分数区间','分数区间']].drop_duplicates(subset=['old分数区间','分数区间'], keep='last').set_index('分数区间').to_dict(orient='dict').get('old分数区间')
 
fenshu_map.to_dict().values()
sss=fenshu_map.values()
####################################,2,处理数据,只有大众的
#风险等级维度
var='风险等级'
var='分数区间'
 
 
#1)预授信
df1["只有大众预授信"] = df1.apply( lambda x:"是" if (x.大众资质==0 and x.瓜子资质==0) else "否"  , axis = 1)
df1['分数区间']=df1['old分数区间'].str.split(",",1).map(lambda x:float(x[0][1:]))
#df1['分数区间排序']=df1['分数区间'].rank()
 
a1 = df1.groupby(var).apply( lambda x:(x['只有大众预授信']=='否').sum() )
a2 = df1.groupby(var).apply( lambda x:(x['只有大众预授信']=='是').sum() )
a3=pd.concat([a1,a2],axis=1)
a3['总计']=a3[0] + a3[1]
a3.loc['行累加']=a3.apply(lambda x: x.sum(),axis=0) 
a3.sort_index(ascending=False,inplace=True)
 
a3.columns=['否','是','总计']
a3['是列累加']=a3['是'].cumsum()
 
 
a3['预授信累计占比']=a3['是列累加']/a3.loc['行累加','总计']
 
a3.drop(['是列累加'],axis=1,inplace=True)
dazong_risk_ysx = a3
 
#2)路由通过拒绝,
a1 = df1.groupby(var).apply( lambda x:(x['大众审核结果']==2).sum() )
a2 = df1.groupby(var).apply( lambda x:(x['大众审核结果']==3).sum() )
a3=pd.concat([a1,a2],axis=1)
a3['总计']=a3[0] + a3[1]
 
a3.sort_index(ascending=False,inplace=True)
 
a3.columns=['否','是','总计']
a3.loc['行累加']=a3.apply(lambda x: x.sum(),axis=0) 
a3['是列累加']=a3['否'].cumsum()
a3['总计累加']=a3['总计'].cumsum()
 
 
a3['预授信累计占比']=a3['是列累加']/a3['总计累加']
 
a3.drop(['是列累加'],axis=1,inplace=True)
a3.drop(['总计累加'],axis=1,inplace=True)
 
dazong_risk_shenhe = a3
 
#3)真实通过拒绝
a1 = df1.groupby(var).apply( lambda x:(x['大众审核结果']==2).sum() )
a2 = df1.groupby(var).apply( lambda x:(x['大众真实拒绝']==1).sum() )
a3=pd.concat([a1,a2],axis=1)
a3['总计']=a3[0] + a3[1]
 
a3.sort_index(ascending=False,inplace=True)
 
a3.columns=['否','是','总计']
a3.loc['行累加']=a3.apply(lambda x: x.sum(),axis=0) 
a3['是列累加']=a3['否'].cumsum()
a3['总计累加']=a3['总计'].cumsum()
 
 
a3['预授信累计占比']=a3['是列累加']/a3['总计累加']
 
 
a3.drop(['是列累加'],axis=1,inplace=True)
a3.drop(['总计累加'],axis=1,inplace=True)
dazong_risk_true = a3
 
risk_weidu   = pd.concat([dazong_risk_ysx,dazong_risk_shenhe,dazong_risk_true],axis=1)
 
 
#分数维度
fenshu_weidu = pd.concat([dazong_risk_ysx,dazong_risk_shenhe,dazong_risk_true],axis=1).reset_index()
fenshu_weidu['yinshe'] = fenshu_weidu['index'].map(lambda x: fenshu_map.get(x) )
 
 
 
#####################################输出结果
 
with pd.ExcelWriter('D:\\xxx.xlsx') as writer:
    risk_weidu.to_excel(writer,sheet_name='xx',index=True)
    fenshu_weidu.to_excel(writer,sheet_name='xx')
 
#########################################python中Excel操作
#########################################
#########################################
 
#python中Excel操作
import numpy as np
import pandas as pd
###################################1,写入数据
df = pd.DataFrame({"id":[1001,1002,1003,1004,1005,1006],
                   "date":pd.date_range('20130102', periods=6),
                   "city":['Beijing ', 'SH', ' guangzhou ', 'Shenzhen', 'shanghai', 'BEIJING '],
                    "age":[23,44,54,32,34,32],
                    "category":['100-A','100-B','110-A','110-C','210-A','130-F'],
                    "price":[1200,np.nan,2133,5433,np.nan,4432]},
columns =['id','date','city','category','age','price'])
 
###################################2,数据表检查
#数据维度
df.shape
#数据表信息
df.info()
#数据格式
df.dtypes
df['age'].dtype
#查看空值
df.isnull()
df['price'].isnull()
#查看唯一值
df['city'].unique()
#查看数据表中的值
df.values
#查看列名称
df.columns
#查看前10行,后10行
df.head(10)
df.tail()
 
#################################3,数据表清洗
#删除数据表中含有空值的行
df.dropna(how='any')
#使用数字0填充数据表中空值
df.fillna(value=0)
#使用price均值对NA进行填充
df['price']=df['price'].fillna(df['price'].mean())
#清理空格
#清除city字段中的字符空格
df['city']=df['city'].map(str.strip),
#city列大小写转换
df['city']=df['city'].str.lower()
#更改数据格式
df['price'].astype('int')
#更改列名称
df.rename(columns={'category': 'category-size'})
#删除先出现的重复值
df['city'].drop_duplicates(keep='last')
#数据替换
df['city'].replace('sh', 'shanghai')
 
###################################4,数据预处理
#建立df1数据表
df1=pd.DataFrame({"id":[1001,1002,1003,1004,1005,1006,1007,1008],
                      "gender":['male','female','male','female','male','female','male','female'],
                      "pay":['Y','N','Y','Y','N','Y','N','Y',],
                      "m-point":[10,12,20,40,40,40,30,20]})
#数据表匹配合并
df_inner=pd.merge(df,df1,how='inner')
#设置索引列
df_inner.set_index('id')
#按特定列的值排序
df_inner.sort_values(by=['age'])
#按索引列排序
df_inner.sort_index()
#如果price列的值>3000,group列显示high,否则显示low
df_inner['group'] = np.where(df_inner['price'] > 3000,'high','low')
#对复合多个条件的数据进行分组标记
df_inner.loc[(df_inner['city'] == 'beijing') & (df_inner['price']>= 4000), 'sign']=1
 
#对category字段的值依次进行分列,并创建数据表,索引值为df_inner的索引列,列名称为category和size/
???????????????????????????
[x.split('-') for x in df_inner['category']]
split=pd.DataFrame((x.split('-') for x in df_inner['category']),index=df_inner.index,columns=['category','size'])
 
#将完成分列后的数据表与原df_inner数据表进行匹配
df_inner=pd.merge(df_inner,split,right_index=True, left_index=True)
 
 
###################################5,#数据提取
#按索引提取单行的数值
df_inner.loc[3]
#按索引提取区域行数值
df_inner.loc[0:3]
#重设索引
df_inner.reset_index()
#设置日期为索引
df_inner=df_inner.set_index('date')
#提取4日之前的所有数据
df_inner[:'2013-01-04']
 
#使用iloc按位置区域提取数据
df_inner.iloc[:3,:2]
 
#使用iloc按位置单独提取数据
df_inner.iloc[[0,2,5],[4,5]]
 
#使用ix按索引标签和位置混合提取数据
df_inner.ix[:'2013-01-03',:4]
 
#判断city列的值是否为beijing
df_inner['city'].isin(['beijing'])
#先判断city列里是否包含beijing和shanghai,然后将复合条件的数据提取出来。
df_inner.loc[df_inner['city'].isin(['beijing','shanghai'])]
 
###################################5,数据筛选
#使用“与”条件进行筛选
df_inner.loc[ (df_inner['age'] > 25) & (df_inner['city'] == 'beijing'), ['id','city','age','gender']]
 
#使用“非”条件进行筛选
df_inner.loc[(df_inner['city'] != 'beijing'), ['id','city','age','gender']].sort_values(['id'])
 
#对筛选后的数据按city列进行计数
df_inner.loc[(df_inner['city'] != 'beijing'), ['id','city','age','gender']].sort_values(['id']).city.count()
 
#使用query函数进行筛选
df_inner.query('city == ["beijing", "shanghai"]')
 
#对筛选后的结果按price进行求和
df_inner.query('city == ["beijing", "shanghai"]').price.sum()
 
 
 
 
 
###################################6,数据汇总
#对所有列进行计数汇总
df_inner.groupby('city').count()
#对特定的ID列进行计数汇总
df_inner.groupby('city')['id'].count()
#对两个字段进行汇总计数
df_inner.groupby(['city','size'])['id'].count()
#对city字段进行汇总并计算price的合计和均值。
df_inner.groupby('city')['price'].agg([len,np.sum, np.mean])
 
#数据透视
#设定city为行字段,size为列字段,price为值字段。
#分别计算price的数量和金额并且按行与列进行汇总。
pd.pivot_table(df_inner,index=["city"],values=["price"],columns=["size"],aggfunc=[len,np.sum],fill_value=0,margins=True)
 
###################################7,数据统计
#简单的数据采样
df_inner.sample(n=3)
#手动设置采样权重
weights = [0, 0, 0, 0, 0.5, 0.5]
df_inner.sample(n=2, weights=weights)
#采样后不放回
df_inner.sample(n=6, replace=False)
#采样后放回
df_inner.sample(n=6, replace=True)
#描述统计
#数据表描述性统计
df_inner.describe().round(2).T
df_inner.describe().round(2)
 
#相关性分析
df_inner['price'].corr(df_inner['m-point'])
 
#数据表相关性分析
df_inner.corr()
###################################8,数据输出
#输出到Excel格式
df_inner.to_Excel('Excel_to_Python.xlsx', sheet_name='bluewhale_cc')
 
#输出到CSV格式
df_inner.to_csv('Excel_to_Python.csv')

#########################################numpy操作
#########################################
#########################################
首先,计算机中默认是以e为底,其次,有两种表示方法

 
1,np.log
 
import numpy as np
print( 'np.e:',np.e)
print( 'np.log([100,10000,10000]:',np.log([100,10000,10000]))   #输出结果是不是和你想象的不一样,请往下看   
print( 'np.log10([100,10000,10000]:',np.log10([100,10000,10000])) #以10为底的对数
print( 'np.log([1,np.e,np.e**2]):',np.log([1,np.e,np.e**2]))   #np.log(x) 默认底数为自然数e
print( 'np.log2([2,2**2,2**3]):',np.log2([2,2**2,2**3]))   #以2为底的对数
 
 
2,math.log
 
import math 
#一般用法是math.log(a,底数),其中只输入一个值,则是以e为底
math.log(10)  #2.302585092994046
math.log(math.e)  #1.0
math.log(100,10)  #2.0
math.log(10,100)  #0.5
 
#########################################图像识别
#########################################
#########################################
pip install baidu-aip
pip install pytesseract
 
from PIL import Image
import pytesseract
from aip import AipOcr
import pandas as pd
import os

# -*- coding: utf-8 -*-
"""
Created on Sun May  3 12:44:16 2020
@author: 62669
"""
####################################################1简单识别
####################################################2借助百度
from PIL import Image
import pytesseract
from aip import AipOcr
import pandas as pd
import os
 
APP_ID = '19708344'
API_KEY = '7tRkWyg7YbFwP9u4qOCt41DF'
SECRET_KEY = 'EOBl6LZRprr6ydft7LflTLwfaRQWomnq'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
 
#第一步创建流
def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()
#第二步,读图片并且给字符串
def image2text(fileName):
    image = get_file_content(fileName)
    dic_result = client.basicGeneral(image)
    res = dic_result['words_result']
    result = ''
    for m in res:
        result = result + str(m['words'])
    return result
 
#result = image2text(r'F:\a_houdashi_work\bbb_python学习\其他\图片转文字\圈外总结\结构化61.jpg')
 
#print(result)
#第三步,合并字符串并且导出到txt
 
 
folder_path='D:/hzl/图片转文字/规则集输入'
savefile_path='D:/hzl/图片转文字/规则集输入'
savefile_name='a.csv'
 
os.chdir(folder_path)
file_list=os.listdir()
file_list.sort(reverse = True)
file_list
 
 
filestart=0
fileend=len(file_list)
end = ''
for i in range(filestart,fileend):
    name = folder_path +'/'+file_list[i]
    print(name)
    result = image2text(name)
    end = '\n'+ name + '\n' + result + end  +'\n' 
print(end)
 
with open(r"D:\hzl\图片转文字\a.txt", "w", encoding='utf-8') as f:
    f.write(str(end))
    f.close()

#########################################python常用代码段
#########################################
#########################################
读写excel
pandas.read_excel(io,sheet_name = 0,header = 0,names = None,index_col = None,usecols = None,squeeze = False,dtype = None, ...)
 
df.to_excel(r'D:\xxx\a.xlsx')
with pd.ExcelWriter(r'D:\xxx.xlsx') as writer:
    offline_score1.to_excel(writer,sheet_name='offline_score1',index=False)
    offline_score2.to_excel(writer,sheet_name='offline_score2')
    offline_score3.to_excel(writer,sheet_name='offline_score3')
    offline_score4.to_excel(writer,sheet_name='offline_score4')

2,列表统计元素数量   
1,字典统计
a = [1, 2, 3, 1, 1, 2]
dict = {}
for key in a:
    dict[key] = dict.get(key, 0) + 1
print(dict)
 
2,collections统计
 
from collections import Counter
a = [1, 2, 3, 1, 1, 2]
result = Counter(a)
print(result)

3,两个列表找到相同和不同元素
1,集合方法
list1 = [1,2,3]
list2=[2,3,4]
set1 = set(list1)
set2 = set(list2)
 
print(set1&set2)
{2, 3}
print(set1^set2)
{1, 4}
 
2,for方法
y1 = [1,2,3]
y2 = [2,3,4]
 
def find_diff_intwo_list(list1,list2):
    same,diff=[],[]
    seq=list(set(list2))
    for i in list(set(list1)):
        if i not in list2:
            diff.append(i)
        else:
            same.append(i)
    for j in same:
        seq.remove(j)
    print("same is {},a_diff is {},b_diff is {}".format(same,diff,seq))
    return same,diff,seq
 
z1,z2,z3 = find_diff_intwo_list(y1,y2)
3,counter方法
from collections import Counter
 
list1 = [1,2,3,5,7,7,7,98]
list2 = [93,0,2,6,7,98]
 
def find_diff_by_twolist(list1,list2):
    newli=list(set(list1)) + list(set(list2))
    count=Counter(newli)
    same, diff = [], []
    for i in count.keys():
        print(i)
        if(count.get(i)>=2):
            same.append(i)
        else:
            diff.append(i)
    print("same is {},diff is {}".format(same, diff))


#########################################python函数
#########################################
#########################################

map


filter


reduce


cut
cut
pandas.cut(x, bins, right=True, labels=None, retbins=False, precision=3, include_lowest=False, duplicates='raise') #0.23.4
 
 
功能:把一组数据分割成离散的区间
 
参数:x:被切分的类数组(array-like)数据,必须是1维的(不能用DataFrame);
 
bins:bins是被切割后的区间(或者叫“桶”、“箱”、“面元”),有3中形式:一个int型的标量、标量序列(数组)或者pandas.IntervalIndex 。
 
一个int型的标量
 
 当bins为一个int型的标量时,代表将x平分成bins份。x的范围在每侧扩展0.1%,以包括x的最大值和最小值。
 
标量序列
 
 标量序列定义了被分割后每一个bin的区间边缘,此时x没有扩展。
 
pandas.IntervalIndex
 
定义要使用的精确区间。
 
right:bool型参数,默认为True,表示是否包含区间右部。比如如果bins=[1,2,3],right=True,则区间为(1,2],(2,3];right=False,则区间为(1,2),(2,3)。
 
labels:给分割后的bins打标签,比如把年龄x分割成年龄段bins后,可以给年龄段打上诸如青年、中年的标签。labels的长度必须和划分后的区间长度相等,比如bins=[1,2,3],划分后有2个区间(1,2],(2,3],则labels的长度必须为2。如果指定
 
labels=False,则返回x中的数据在第几个bin中(从0开始)。
 
retbins:bool型的参数,表示是否将分割后的bins返回,当bins为一个int型的标量时比较有用,这样可以得到划分后的区间,默认为False。
 
precision:保留区间小数点的位数,默认为3.
 
include_lowest:bool型的参数,表示区间的左边是开还是闭的,默认为false,也就是不包含区间左部(闭)。
 
duplicates:是否允许重复区间。有两种选择:raise:不允许,drop:允许。
 
返回值
 
out:一个pandas.Categorical, Series或者ndarray类型的值,代表分区后x中的每个值在哪个bin(区间)中,如果指定了labels,则返回对应的label。
 
bins:分隔后的区间,当指定retbins为True时返回。


1)将ages平分成5个区间
ages = np.array([1,5,10,40,36,12,58,62,77,89,100,18,20,25,30,32]) 
pd.cut(ages, 5)
 
2)将ages平分成5个区间并指定labels
ages = np.array([1,5,10,40,36,12,58,62,77,89,100,18,20,25,30,32]) #年龄数据
pd.cut(ages, 5, labels=[u"婴儿",u"青年",u"中年",u"壮年",u"老年"])
 
3)给ages指定区间进行分割
ages = np.array([1,5,10,40,36,12,58,62,77,89,100,18,20,25,30,32]) #年龄数据
pd.cut(ages, [0,5,20,30,50,100], labels=[u"婴儿",u"青年",u"中年",u"壮年",u"老年"])
 
4)返回分割后的bins
ages = np.array([1,5,10,40,36,12,58,62,77,89,100,18,20,25,30,32]) #年龄数据
pd.cut(ages, [0,5,20,30,50,100], labels=[u"婴儿",u"青年",u"中年",u"壮年",u"老年"],retbins=True)
 
5)只返回x中的数据在哪个bin
ages = np.array([1,5,10,40,36,12,58,62,77,89,100,18,20,25,30,32]) #年龄数据
pd.cut(ages, [0,5,20,30,50,100], labels=False)
 
6)实战
 
#用cut函数对于年龄进行分段分组,用bins来对年龄进行分段,左开右闭
score_groups=pd.cut(test['score'],bins=[-999999,0,100,200,300,400,500,600,700,800,900,1000,2000,99999])
 
b =test[['score','y_pred']]
 
b.rename(columns={'score':'n'},inplace=True)
 
a = pd.concat([b,score_groups],axis=1)
 
a.columns
c = a.groupby(a.score).count().reset_index()
 
 
#########################################python连接hivesql
#########################################
#########################################
 
函数
def getfeature(datasource,s_dt,end_dt):
    datasource1 = '$.'+datasource
    datasource2 = datasource1+'.result_code'
    sql = '''           
        SELECT
           name,
           id_card_encrypt,
           request_date,
           result_code,
           {0}
        FROM
          (
           SELECT DISTINCT
              name,
              id_card_encrypt,
              date_format(from_unixtime((request_time / 1000)), '%Y-%m-%d') request_date,
              CAST(json_extract(data_dump, '{2}') AS INT) result_code,
              json_extract(data_dump, '{1}') {0}
           FROM
             gzlc_real.fact_rule_engine_result_log
           WHERE dt >= '{3}' and dt < '{4}' AND json_extract(data_dump, '{1}') IS NOT NULL 
           ) 
    '''.format(datasource,datasource1,datasource2,s_dt,end_dt)
    
    cursor = presto.connect_presto(sql.format(datasource,datasource1,datasource2,s_dt,end_dt),username = 'sunmanman1')
    rawdata = cursor.querySQL()
    return rawdata
调用
rawdata = {}
featurelist = ['a','b','c']
    
    for i in featurelist:
        print(i)
        rawdata[i] = getfeature(i,'2021-09-01','2021-10-14')
        print(min(rawdata[i].request_date))



解析json
 
#   特征解析        
    def get_x(datasource):
        j = 1
        df = rawdata[datasource].copy()
        df.sort_values(['request_date'],ascending = True,inplace = True)
        varlist = list(json.loads(df.iat[0,4]).keys())
        varlist.remove('result_code')
        for i in varlist:
            print(i,j)
            j = j + 1
            df[i] = df[datasource].map(lambda x : json.loads(x)[i])
        df = df[['name','id_card_encrypt','request_date','result_code']+varlist]
        return df
    
    
    datasource_list = [ 'df_a','df_b','df_c']
    df_datasource = {}
    for i in datasource_list:
        j = i[3:]
        print(i)
        df_datasource[i] =  get_x(j)

处理字典中datafram数据
1,先领出来要处理的
xx= df_datasource['xx'].copy()
 
2,处理过程
    def func2(x):
        if x =='-1':
            y = -1
        else:
            y = -99
        return y
            
    def func3(x):
        if x == '-1':
            y =-1
            y = 1
        else:
            y = -99
        return y
 
    xx['col2'] = xx['col2'].map(lambda x :func2(x))
    xx['col3'] = xx['col3'].map(lambda x :func3(x))
 
3,处理好放回去
 df_datasource['xx'] = xx.copy()
 
 合并数据
#   合并数据
 
    def merge_x(modeldata_final,datasource):
    #    datasource = 'df_I_OneConnectRuleData'
        df = df_datasource[datasource].copy()
        df = df[df.result_code == 0]   
        df.sort_values(by = ['name','id_card_encrypt','request_date'],ascending = [True,True,False],inplace = True)
        df.drop_duplicates( ['name','id_card_encrypt'],keep = 'first',inplace = True)
        modeldata_finalx = modeldata_final.merge(df,how = 'left',on = ['name','id_card_encrypt'])
        modeldata_finalx['result_code'+datasource[3:]] = modeldata_finalx['result_code'].fillna(-1)
        modeldata_finalx = modeldata_finalx.fillna(-99999)
        del modeldata_finalx['request_date'],modeldata_finalx['result_code']
        return modeldata_finalx
    
    modeldata_merge = sampledata.copy()
    #print(modeldata_merge.dtypes,set(list(sampledata.rout_date.isnull())))
 
    datasource_list = [ 'df_xx1','df_xx2','df_xx3']
    for i in datasource_list:
        print(i,modeldata_merge.shape[0])
        modeldata_merge = merge_x(modeldata_merge,i)
    
 
#   评分流程
 
    datascore = modeldata_merge.copy()


#########################################python中dataframe相关
#########################################
#########################################

Dataframe 根据某一列数据的值修改另一列的值
1,方法一loc
data_frame.loc[ (data_frame["列1"] != 0),"列2" ] = -99999    
 
2,方法二
data_frame['列1'][data_frame['列2']!=0] = -99999;
 
3,方法三np.where
tt['gender'] =np.where(tt['isMarried']=='yes',-9999,tt['gender'])

Dataframe 根据某多列数据的值修改另一列的值
如果多列条件,
列a>100,d是-9999
列b<列c,d是-8888

排序,降序是FALSE
# 两个字段都降序
df.sort_values(by=["aqiLevel","bWendu"],ascending = False)

获取dataframe的列三种方法
#获取dataframe的列三种方法
[column for column in df]
[a for a in df]
 
 
df.columns.tolist()
 
list(df.columns)

删除dataframe列,修改列名
import pandas as pd
a = pd.DataFrame([[1,2,3],
                  [3,1,3],
                  [1,2,1]])
 
labels = ['a', 'b', 'c']
#a = pd.DataFrame(a, index=labels,columns=labels)
 
添加列名
a.columns = labels
 
修改列名
df.rename(columns={'原列名':'新列名'},inplace=True)
 
删除一列
a.drop('a',axis = 1,inplace = True) #axis参数默认为0
删除多列
a.drop(columns=['a','b'],axis=1,inplace = True)
a.head()


datafram去重
df.drop_duplicates(subset=None, keep='first', inplace=False)
 
subset:column label or sequence of labels, optional ,用来指定特定的列,默认所有列
keep:{‘first’, ‘last’, False}, default ‘first’ ,删除重复项并保留第一次出现的项
inplace:boolean, default False ,是否直接修改原 dataframe

保存dataframe
查看datafram中字段类型
offline['score'].dtypes
offline.dtypes

两个dataframe拼接不同顺序字段
import pandas as pd
import numpy as np
 
data = {'name': ['Joe', 'Mike', 'Jack', 'Rose', 'David', 'Marry', 'Wansi', 'Sidy', 'Jason', 'Even'],
 
        'age': [25, 32, 18, np.nan, 15, 20, 41, np.nan, 37, 32],
 
        'gender': [1, 0, 1, 1, 0, 1, 0, 0, 1, 0],
 
        'isMarried': ['yes', 'yes', 'no', 'yes', 'no', 'no', 'no', 'yes', 'no', 'no']}
 
labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
df = pd.DataFrame(data, index=labels)
 
fea1=['age','name']
fea2=['age','name','gender']
fea3=['gender','age','name']
 
df1 = df[fea1]
df2 = df[fea2]
df3 = df[fea3]
 
df33 = pd.concat([df1, df2], axis=0)
df333 = pd.concat([df1, df3], axis=0)
 
#实现变换字段顺序
fea=['age','name']
df3333 = df333[fea]

dataframe统计元素数量
import pandas as pd
a = pd.DataFrame([[1,2,3],
                  [3,1,3],
                  [1,2,1]])
result = a.apply(pd.value_counts)
print(result)

那么,如果列表项统计,如何将列表变成dataframe?

import pandas as pd
from pandas.core.frame import DataFrame
a = [1, 2, 3, 1, 1, 2,2,2]
a2 = DataFrame(a)
a2.value_counts()



字典如何转dataframe?
两种方法:
 
第一种:直接转
df = pd.DataFrame(d1)
 
from pandas.core.frame import DataFrame
a=[1,2,3,4]#列表a
b=[5,6,7,8]#列表b
c={"a" : a,
 "b" : b}#将列表a,b转换成字典
data=DataFrame(c)#将字典转换成为数据框
print(data)
 
第二种:使用函数可以实现横向的转换
pd.DataFrame.from_dict(d1, columns=['A', 'B'], orient='index')
 
15,datafram中字段转换数据类型

16,多行转一行

SELECT
id,
group_concat(distinct(class),',') AS refuse
FROM sample
GROUP BY id

17, 一行转换多行
SELECT
IDNUMBER,
id,
ss.class
FROM sample
LATERAL VIEW explode(split(class,",")) ss AS class

18,df.shape
df.shape[0]求出df的行数

df.shape[1]求出df的列数

19,查看dataframe的列类型
ccc = online_score3.dtypes
for i in ccc.index: # 依次选取Series的各个key
    print(i,'   ',ccc[i]) # 这里的c[i]的使用,在Series中可以使用Series[key]的形式取出对应的value


20,处理变量名的连接
fea = ['apply_id','name','dt','created_at','score'] + model2.feature_name_




#########################################python中跑hivesql
#########################################
#########################################
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 19:00:42 2021
@author: Administrator
"""
 
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 24 18:11:10 2021
@author: Administrator
"""
 
import pandas as pd
from presto_cli import presto_client
import numpy as np
import datetime
from pandas.io.json import json_normalize
import time
import json
 
from presto_cli import presto_client
from presto_cli_v2 import presto_client_v2 as presto
import pandas as pd
import collections
import json
import joblib
import numpy as np
 
 
vsql ="""  
SELECT DISTINCT apply_id
                ,name
                ,dt
                ,created_at
                ,json_extract_scalar(final_score_data,'$.score') as score
                ,var_data
           FROM
            
             gzlc_real.fact_risk_algo_api_log
           WHERE dt >= '2021-09-27' and algo_name in ('bank_reject_cfm_c2c_v1')  and created_at>='2021-09-27 18:00:54'
"""
 
class connectHiv(object):
    def __init__(self,sql,path ="xxx",port = 443, username="xxx", source="pf=hubble;client=pyhive"):
        self.path = path
        self.prot = port 
        self.username = username
        self.source = source
        self.sql = sql
        self.getCursor()
         
    def getCursor(self):
        self.CURSOR =presto_client.connect(self.path, port=self.prot, username=self.username,
                           group="xxx", password='xxx', catalog="hive", schema="xxx",
                           ).cursor()
    
    def querySQL(self):
        self.CURSOR.execute(self.sql)
        result = self.CURSOR.fetchall()
        return result
    def getSql(self):
        print(self.sql)
        
#获取数据库连接
cursor = connectHiv(sql=vsql)
result = cursor.querySQL()
 
#将数据存储成表格形式
df11111 = pd.DataFrame(result)
df11111.columns=['apply_id','name','dt','created_at','score','var_data']
 
 
df_to_use = df11111[df11111['apply_id'] != 'apply_id']
df_to_use.reset_index(inplace=True)
 
global false, null, true
false = null = true = ''
 
all_df =None
 
 
 
order_id_list = df_to_use['apply_id'].values.tolist() # 以order_id 或apply_id for循环
 
for ai in range(len(order_id_list)):
    try:
        data_dump = [json.loads(df_to_use['var_data'][ai])]
        jsonformat = json.dumps(data_dump, sort_keys=True, indent=4, separators=(',', ': '))
        df1 = pd.DataFrame.from_dict(json_normalize(data_dump), orient='columns')
        dforder=pd.DataFrame()
        dforder['apply_id']=[df_to_use['apply_id'][ai]]
        #dforder['business_code'] = [df_to_use['business_code'][ai]]
        df = pd.merge(dforder,df1,left_index=True,right_index=True)
 
        if all_df is None:
            all_df = df
            print(ai)
        else:
            all_df = pd.concat([all_df, df])
            print(ai)
    except:
        print(df_to_use['id'][ai])
        f = "/失败订单记录.txt"
        a = 1
        false_order = str(df_to_use['id'][ai])
        with open(f,"a") as file:
            for i in range(a):
                file.write(false_order+"\n")
            a +=1
 
all_df=all_df.drop_duplicates()
final_df =  pd.merge(df_to_use,all_df,how='left',on='apply_id')
 


#########################################python中集合的运算
#########################################
#########################################
-- 交集,并集,差集,对称集,&,|,-,^
 
list1 = ['a','b','c','d','e','e','a']
list2 = ['a','b','b','b']
list3 = ['d','e','e']
 
set1 = set(list1)
set2 = set(list2)
set3 = set(list3)
 
chaji = set1-set2-set3
a = list(chaji)
 
jiaoji = set1&set2
jiaoji
 
bingji = set2|set3
bingji
 
duicheng=set1^set2
duicheng

#########################################截面数据循环左连接
#########################################
#########################################
list_1 = [['放款日期','放款量'],['订单日期','订单量']]
for i,j in list_1:
    print(i,j) 
    
    tmp = pd.merge(tmp,df.groupby([i,'flag'])[j].sum().reset_index(),
                   how='left',left_on=["进件日期",'flag'],right_on=[i,'flag'])










  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值