pyhon中利用publicsuffix获得域名

工作内容记录下:


#encoding:utf-8
'''
Created on 2015年11月23日

@author: wilson.zhou
'''
import psycopg2
import pandas as pd
from publicsuffix import PublicSuffixList
import urllib
import re
from datetime import datetime
start=datetime.now()
psl = PublicSuffixList()
# print(psl.get_public_suffix("http://ad.3kjj.com"))
conn = psycopg2.connect(host="10.1.1.230",user="etladmin", password="!SdraFtN@t",database="xmo_dw" , port="5432")
cur = conn.cursor()
# cur.execute("select id::char(39),bidded_at,viewed_at,viewed_at_i,opxpid,opxsid,opxbid,client_id,pretargeting_id,searchengine_id,adgroup_id,adtext_id,placement_url,\
# winning_prize,cpm,ip,useragent,first_viewed_percent,max_viewed_percent,countryshort,countrylong,ipcity,record_machine,aud_hash, crm_hash,\
# dw_loaded_at,adslot_id,canonical,domain,rtb_cookie_id,ht_score,age,gender,audience,reason,interest from xmo_dw.rtb_datas \
# where searchengine_id in (select id from xmo_dw.searchengines \
# where campaign_id=14039) and viewed_at_i=20151122 limit 10;")


df=pd.read_sql("select * from xmo_dw.rtb_datas \
where searchengine_id in (select id from xmo_dw.searchengines \
where campaign_id=14039) and viewed_at_i=20151122 limit 10;",conn);
df['id']=df['id'].map(lambda x:str(x))
def urldecode(x):
    url = urllib.unquote(x)
    return url
patt = re.compile('(^http\:\/\/.*?)\/.*?') 
def urlclear(x):
    if x!='None':
        try:
            x1=re.findall(patt,x)[0]
            return x1
        except:
            return 'None'
    else:
        return 'None'
df['canonical']=df['domain'].map(urldecode).map(urlclear).map(lambda x:psl.get_public_suffix(x))
df['domain']=df['domain'].map(urldecode)
# df['id']=df['id'].map(lambda x:str(x))
df['bidded_at']=df['bidded_at'].map(lambda x:str(x))
df['opxpid']=df['opxpid'].map(lambda x:str(x))
df['viewed_at_i']=df['viewed_at_i'].map(lambda x:str(x))
df['client_id']=df['client_id'].map(lambda x:str(x))
df['pretargeting_id']=df['pretargeting_id'].map(lambda x:str(x))
df['searchengine_id']=df['searchengine_id'].map(lambda x:str(x))
df['adgroup_id']=df['adgroup_id'].map(lambda x:str(x))
df['adtext_id']=df['adtext_id'].map(lambda x:str(x))
df['cpm']=df['cpm'].map(lambda x:str(x))
df['ht_score']=df['ht_score'].map(lambda x:str(x))
df['adslot_id']=df['adslot_id'].map(lambda x:str(x))

cur = conn.cursor()
print df.dtypes

# pd.read_sql("delete from  xmo_dw.temp_rtb_datas_14039",conn)
# cur.execute("delete from  xmo_dw.temp_rtb_datas_14039")
colnames=df.columns
print colnames
print len(colnames)
cur.execute("delete from xmo_dw.searchengines;")
conn.commit()
# 
# result=zip(df[colnames[0]],df[colnames[1]],df[colnames[2]],df[colnames[3]],df[colnames[4]],df[colnames[5]],df[colnames[6]],df[colnames[7]],df[colnames[8]],df[colnames[9]],df[colnames[10]],df[colnames[11]],\
#     df[colnames[12]],df[colnames[13]],df[colnames[14]],df[colnames[15]],df[colnames[16]],df[colnames[17]],df[colnames[18]],df[colnames[19]],df[colnames[20]],df[colnames[21]],df[colnames[22]],df[colnames[23]], \
#     df[colnames[24]],df[colnames[25]],df[colnames[26]],df[colnames[27]],df[colnames[28]],df[colnames[29]],df[colnames[30]],df[colnames[31]],df[colnames[32]],df[colnames[33]],df[colnames[34]],df[colnames[35]])
# # for i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i26,i27,i28,i29,i30,i31,i32,i33,i34,i35,i36 in result:
# #     cur.execute("INSERT INTO temp_rtb_datas_14039( colnames[0],colnames[1],colnames[2],colnames[3],colnames[4],colnames[5],colnames[6],colnames[7],colnames[8],colnames[9],colnames[10],colnames[11],colnames[12],colnames[13],colnames[14],colnames[15],colnames[16] ,\
# # colnames[17], colnames[18],colnames[19],colnames[20],colnames[21],colnames[22],colnames[23],colnames[24],colnames[25],colnames[26],colnames[27],colnames[28],colnames[29],colnames[30],colnames[31],colnames[32],colnames[33],colnames[34],colnames[35])VALUES(%s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, \
# # %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i26,i27,i28,i29,i30,i31,i32,i33,i34,i35,i36))
# #     
# print result
# for i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i26,i27,i28,i29,i30,i31,i32,i33,i34,i35,i36 in result:
#     cur.execute("INSERT INTO temp_rtb_datas_14039(id,bidded_at,viewed_at,viewed_at_i,opxpid,opxsid,opxbid,client_id,pretargeting_id,searchengine_id,adgroup_id,adtext_id,placement_url,\
# winning_prize,cpm,ip,useragent,first_viewed_percent,max_viewed_percent,countryshort,countrylong,ipcity,record_machine,aud_hash, crm_hash,\
# dw_loaded_at,adslot_id,canonical,domain,rtb_cookie_id,ht_score,age,gender,audience,reason,interest)VALUES(%s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, \
# %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",(i1,i2,i3,i4,i5,i6,i7,i8,i9,i10,i11,i12,i13,i14,i15,i16,i17,i18,i19,i20,i21,i22,i23,i24,i25,i26,i27,i28,i29,i30,i31,i32,i33,i34,i35,i36))
# conn.commit()   
df.to_csv("d:\\wilson.zhou\\Desktop\\result.csv",delim="#",index=False)
conn.close()
total=(datetime.now()-start).total_seconds()
print ("总共花费了{0}秒".format(total))
print("sucess")


评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值