python怎么爬取excel_Python爬取拉勾网招聘信息并写入Excel

1 #-*- coding:utf-8 -*-

2

3 importurllib4 importurllib25 from bs4 importBeautifulSoup6 importre7 importxlwt8

9 #initUrl = 'http://www.lagou.com/zhaopin/Python/?labelWords=label'

10 defInit(skillName):11 totalPage = 30

12 initUrl = 'http://www.lagou.com/zhaopin/'

13 #skillName = 'Java'

14 userAgent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'

15 headers = {'User-Agent':userAgent}16

17 #create excel sheet

18 workBook = xlwt.Workbook(encoding='utf-8')19 sheetName = skillName + 'Sheet'

20 bookSheet =workBook.add_sheet(sheetName)21 rowStart =022 for page inrange(totalPage):23 page += 1

24 print '##################################################### Page',page,'#####################################################'

25 currPage = initUrl + skillName + '/' + str(page) + '/?filterOption=3'

26 #print currUrl

27 try:28 request = urllib2.Request(currPage,headers=headers)29 response =urllib2.urlopen(request)30 jobData =readPage(response)31 #rowLength = len(jobData)

32 for i,row inenumerate(jobData):33 for j,col inenumerate(row):34 bookSheet.write(rowStart +i,j,col)35 rowStart = rowStart + i +1

36 excepturllib2.URLError,e:37 if hasattr(e,"code"):38 printe.code39 if hasattr(e,"reason"):40 printe.reason41 xlsName = skillName + '.xls'

42 workBook.save(xlsName)43

44 defreadPage(response):45 btfsp =BeautifulSoup(response.read())46 webLinks = btfsp.body.find_all('div',{'class':'p_top'})47 #webLinks = btfsp.body.find_all('a',{'class':'position_link'})

48 #print weblinks.text

49 count = 1

50 jobData =[]51 for link inwebLinks:52 print 'No.',count,'==========================================================================================='

53 pageUrl = link.a['href']54 jobList =loadPage(pageUrl)55 #print jobList

56 jobData.append(jobList)57 count += 1

58 returnjobData59

60 defloadPage(pageUrl):61 currUrl = 'http:' +pageUrl62 userAgent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'

63 headers = {'User-Agent':userAgent}64 try:65 request = urllib2.Request(currUrl,headers=headers)66 response =urllib2.urlopen(request)67 content =loadContent(response.read())68 returncontent69 excepturllib2.URLError,e:70 if hasattr(e,"code"):71 printe.code72 if hasattr(e,"reason"):73 printe.reason74

75 defloadContent(pageContent):76 #print pageContent

77 btfsp =BeautifulSoup(pageContent)78 #job infomation

79 job_detail = btfsp.find('dl',{'id':'job_detail'})80 jobInfo =job_detail.h1.text81 tempInfo = re.split(r'(?:\s*)',jobInfo) #re.split is better than the Python's raw split function

82 jobTitle = tempInfo[1]83 jobName = tempInfo[2]84 job_request = job_detail.find('dd',{'class':'job_request'})85 reqList = job_request.find_all('p')86 jobAttract = reqList[1].text87 publishTime = reqList[2].text88 itemLists = job_request.find_all('span')89 salary =itemLists[0].text90 workplace = itemLists[1].text91 experience = itemLists[2].text92 education = itemLists[3].text93 worktime = itemLists[4].text94

95 #company's infomation

96 jobCompany = btfsp.find('dl',{'class':'job_company'})97 #companyName = jobCompany.h2

98 companyName = re.split(r'(?:\s*)',jobCompany.h2.text)[1]99 companyInfo = jobCompany.find_all('li')100 #workField = companyInfo[0].text.split(' ',1)

101 workField = re.split(r'(?:\s*)|(?:\n*)',companyInfo[0].text)[2]102 #companyScale = companyInfo[1].text

103 companyScale = re.split(r'(?:\s*)|(?:\n*)',companyInfo[1].text)[2]104 #homePage = companyInfo[2].text

105 homePage = re.split(r'(?:\s*)|(?:\n*)',companyInfo[2].text)[2]106 #currStage = companyInfo[3].text

107 currStage = re.split(r'(?:\s*)|(?:\n*)',companyInfo[3].text)[1]108 financeAgent = ''

109 if len(companyInfo) == 5:110 #financeAgent = companyInfo[4].text

111 financeAgent = re.split(r'(?:\s*)|(?:\n*)',companyInfo[4].text)[1]112 workAddress = ''

113 if jobCompany.find('div',{'class':'work_addr'}):114 workAddress = jobCompany.find('div',{'class':'work_addr'})115 workAddress = ''.join(workAddress.text.split()) #It's sooooo cool!

116

117 #workAddress = jobCompany.find('div',{'class':'work_addr'})

118 #workAddress = ''.join(workAddress.text.split()) # It's sooooo cool!

119

120 infoList =[companyName,jobTitle,jobName,salary,workplace,experience,education,worktime,jobAttract,publishTime,121 workField,companyScale,homePage,workAddress,currStage,financeAgent]122

123 returninfoList124

125 defSaveToExcel(pageContent):126 pass

127

128 if __name__ == '__main__':129 #Init(userAgent)

130 Init('Python')

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值