学习爬虫第二天:继续爬公司内部的通讯录

import requests
from pyquery import PyQuery as pq
from bs4 import BeautifulSoup
import re
import datetime

#首先通过chrome获得headers,包括user-agent和cookie
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
    'Cookie': 'tips=1; V7Qq_2132_smile=1D1; nodeId=_all; nodeType=1; V7Qq_2132_saltkey=jE3E1veZ; V7Qq_2132_lastvisit=1540778092; warning=1; TimeOut=-1; LoginTime=1541143580000; ShowAlert=0; ShowTimeOut=0; V7Qq_2132_editormode_e=1; V7Qq_2132_ulastactivity=be5f8Jd10mhLpV65dxvLP0K1%2BKlfS7mfpC74GPDlXhH0gJSJoiAR; V7Qq_2132_forum_lastvisit=D_36_1541556325; V7Qq_2132_visitedfid=48D36D46D131D53D42D45D39D37D40; PD_STATEFUL_e2a6d2b4-9490-11e6-952f-d8d385a4d314=%2FMultiSeUn; logintype=4; vfCode=uDYGxC; tivoli_loginname=zhanglei1; auth_flag=true; PD-H-SESSION-ID=4_mVnmIh2aciCM3p83ix2dAy5ASdFUA+eErAe8HfXym7XLVZJX; loged=4_mVnmIh2aciCM3p83ix2dAy5ASdFUA+eErAe8HfXym7XLVZJX; messageStr=%u4E0A%u6B21%u767B%u5F55%u65F6%u95F4%uFF1A%3Cfont%20color%u7B49%u4E8E%27yellow%27%3E2018-11-07%2019%3A18%3A22%3C/font%3E%uFF0CIP%uFF1A%3Cfont%20color%u7B49%u4E8E%27yellow%27%3E10.7.31.170%3C/font%3E; goto_url=/; AMWEBJCT!%2FportalserverU!JSESSIONID=0000Yj0K3_CQF9zfidzS0LuSFyw:1a4c3h4fl; ltpacreatetime=1541637238384; acc_valid=1; AMWEBJCT!%2Fportalwas1!JSESSIONID=0000GGz5NFSVoynZVO4PiccMllM:1a3qoeddj; mssPortalPreLogin=0; PD_STATEFUL_a2d06bb2-9f14-11e1-9654-0050561100ae=%2Fappserver; AMWEBJCT!%2FIDAP!JSESSIONID=E802784C0A2987E82501D76C1008410E; Coremail.sid=BAKDKuBBUHYtxYGcJWBBXFTiAMtOESTx; updatesessiontime=1541642831; AMWEBJCT!%2Fportalserver!JSESSIONID=0000Dqyy57wnLWYjUcskCGp_-PW:14q3sh66a; LtpaToken=x5qkJ7PIdJ36dd3xp+WPwGG8KyvONhpP6LUAK5mJKm6q+7vewMmlzZsUNch+tED1xN8hjrf6JeJ/mP+G7jlYr4VpPYwLf6FW2ZnHCndRB0MaZIpEGUmWZRWwaoI5cs/42A+/QIWYCFJpn7L2RJ34eYoQoHNVwr5oWXkbFGArfUWlPjf1p+rEXhk8lAjWHxpHMR500Colf3GTIKKQoIqIwW1AwjsbFuK0SfGzuEh8WI3Iy3VCcxBo8vTEMOHOh4DHJhrJ6esQzRVszXNesWgOP5f1hl/AfBrPbbgNEnuupUj0cxT+PKIUKj0x7uIYM6PQC9h19EnprymCc6dAF0vZxmMnaYeAVfWz; AMWEBJCT!%2Fportalserver!JSESSIONID2=0000HvsQzC2kC1VsMmrl9OZqLjI:14q3sgu18; MssSsoToken=QW6uLAypiih/mW33jw2kbkF2L1vA6RZjaBVUrTGH/gA=; AMWEBJCT!%2Fpwas_virtual!JSESSIONID=0000xtpzPuILdJxOu3r2w2rAxoT:1amslq7b0; AMWEBJCT!%2Fappserver!JSESSIONID=0000TRD1aVMFw3IVSfIj1aKqRDw:16saotmp4'
}

#获取该部门通讯录的最大页数
def Part_List(part_Name,part_ID):
    url = r"http://www.sh.ctc.com/CompanyAddressListNew/newDeptShow.do?method=doSearch&ZDWID="+part_ID+"&currentPage=1&orderIndex=&orderSign=1&str=all&isVirtal=no"
    r = requests.get(url=url, headers=headers)
    r.encoding = 'utf-8'
    soup = BeautifulSoup(r.text, 'lxml')
    totalPage = int(soup.find(name='input',attrs={'name':'totalPage'})['value'])
    #通过正则表达式获得该部门的总人数
    temp_renshu = soup.find(name='b',attrs={'f-fam1'}).string
    if temp_renshu == '没有找到记录.':
        print("部门:{}没有找到记录".format(part_Name))
        return temp_renshu
    else:
        renshu = int(re.search('\D\D(\d+)\D',temp_renshu).group(1))
        print("正在爬取部门:{}{}人的信息".format(part_Name, renshu))
        return totalPage

#获取每一页中每个员工的id和姓名拼音
def get_Agent(totalPage,part_ID):
    for j in range(totalPage+1):
        url = r"http://www.sh.ctc.com/CompanyAddressListNew/newDeptShow.do?method=doSearch&ZDWID="+part_ID+"&currentPage="+str(j)+"&orderIndex=&orderSign=1&str=all&isVirtal=no"
        r = requests.get(url=url, headers=headers)
        r.encoding = 'utf-8'
        soup = BeautifulSoup(r.text, 'lxml')
        agent_clickView = soup.find_all(name='div',attrs={'style':'cursor: pointer;'})
        for i in range(len(agent_clickView)):
            clickView = str(agent_clickView[i]).split("'")
            agent_id = clickView[1]
            agent_py = clickView[3]
            virtualType = clickView[5]
            single_agent(agent_id,agent_py,virtualType,part_ID)

#获取单个员工的信息并导出
def single_agent(agent_id,agent_py,virtualType,part_ID):
    url = r"http://www.sh.ctc.com/CompanyAddressListNew/newDeptShow.do?method=doViewLayer&id="+agent_id+"&isVirtal=no&zygzh="+agent_py+"&ZDWID="+part_ID+"&virtualType="+virtualType
    r = requests.get(url=url, headers=headers)
    r.encoding = 'utf-8'
    soup = BeautifulSoup(r.text, 'lxml')
    staff_detail = []
    for td in soup.select('td'):
        staff_detail.append(td.text.strip())
    #删除列表中不需要的元素
    del staff_detail[2]
    del staff_detail[7]
    del staff_detail[10:14]
    #把获得的员工信息导出到文件中
    with open('上海电信通讯录.txt', 'a', encoding='utf-8') as file:
        file.write(','.join(staff_detail))
        file.write('\n')

#获得每一个三级部门的部门名称和部门id(如浦东电信局)
starttime = datetime.datetime.now()
url = 'http://www.sh.ctc.com/CompanyAddressListNew/deptCustom.do?method=loadMenuData'
response = requests.get(url, headers=headers)
json = response.json()
for i in json:
    if i.get('jt_code'):
        part_ID = i.get('jt_code')
        part_Name = i.get('name')
        totalPage = Part_List(part_Name, part_ID)
        if totalPage == '没有找到记录.':
            continue
        else:
            get_Agent(totalPage, part_ID)
    else:
        continue
endtime = datetime.datetime.now()
total_time = (endtime - starttime).seconds
print("员工数据爬取完毕,总共耗时{}秒".format(total_time))

收获:进一步优化了昨天的代码,同时员工信息从原来的主菜单扩展到了分菜单获取(具体就是打开了一个新的窗口,获得更详细的数据),而员工的id等关键字由主菜单获取
同时加强了基础的学习:列表的删增和导出、soup的字段查找和数据获取等
明天计划:继续爬公司内部通讯录,获得每一个员工的图片并导出
2018年11月8日更新:已把代码优化,可以准确爬取所有部门的数据了

  • 2
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 4
    评论
评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值