python爬取excel文件_爬取拉勾网所有python职位并保存到excel表格 对象方式

#1.把之间案例,使用bs4,正则,xpath,进行数据提取。#2.爬取拉钩网上的所有python职位。

from urllib importrequest,parseimportjson,random#导入xlsxwriter 主要用于生成excel表格对象

importxlsxwriter#创建python的职位类

classpython_position:def __init__(self,page,number):

self.page=page

self.number=number#调用创建excel对象的函数

self.create_book()

self.page_chuli()print(self.page,self.number)#1 0

#创建excel对象函数

defcreate_book(self):#创建excel对象

self.workbook_attr = xlsxwriter.Workbook('test1.xlsx')#创建表对象,用于写入字符串

self.worksheet = self.workbook_attr.add_worksheet('test1')#处理页码数量函数

defpage_chuli(self):#每次页码不一样 也就是number不一样

for page in range(1, 31):

self.page=page

self.user_agent()

self.number+= 1self.workbook_attr.close()defuser_agent(self):#浏览器列表,每次访问可以用不同的浏览器访问

user_agent_list =['Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36','Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0','Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0','Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36','Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36',"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0"]#随机选取一个浏览器访问

self.user_agent_str =random.choice(user_agent_list)#调用拉钩函数

self.lagou()deflagou(self):#职位请求地址

base_url = "https://www.lagou.com/jobs/positionAjax.json?city=%E5%8C%97%E4%BA%AC&needAddtionalResult=false&isSchoolJob=0"

#判断是否是第一次访问,第二次访问data的值不一样

if self.page == 1:

first= 'true'

else:

first= 'false'data={'first':first,'pn':self.page,'kd':'python'}print('page:',self.page)#参数拼接及转码,生成是字符串格式, 注意:长度下面的headers用的到

data =parse.urlencode(data)#一定要比较每次page不一样的时候headers的各项的细微差别 这个很重要 也是能否爬取数据的关键

#在这里Content-Length,User-Agent的值相对来说比较重要

headers ={'Accept': 'application/json, text/javascript, */*; q=0.01',#Accept-Encoding:gzip, deflate

'Accept-Language': 'zh-CN,zh;q=0.8','Connection': 'keep-alive','Content-Length': len(data),'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8','Cookie': 'user_trace_token=20180310205250-ccfd21f6-5b57-4e04-b90c-5e547e18d391; LGUID=20180310205255-f3afa6e4-2461-11e8-a8b5-525400f775ce; hideSliderBanner20180305WithTopBannerC=1; X_HTTP_TOKEN=673c8ae0b29d830c65e9812a6aeeb211; ab_test_random_num=0; JSESSIONID=ABAAABAAADEAAFI0BD8484557BF60A48BF2BDD6AA4C5D33; _putrc=318C0D90043747B6123F89F2B170EADC; login=true; unick=%E5%BC%A0%E6%B3%A2; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; gate_login_token=d46c3e3008cb0364e7b47d9d261956a39273c72d679a1b0eb644e03620c100fa; TG-TRACK-CODE=index_navigation; _gid=GA1.2.1883607132.1520686376; _ga=GA1.2.2068283511.1520686375; LGSID=20180310215122-1e408aca-246a-11e8-a8ed-525400f775ce; LGRID=20180310233852-22b0d3ee-2479-11e8-a921-525400f775ce; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1520686378,1520689884; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1520696337; SEARCH_ID=458b8d44186948ceb472c3d662f08528; index_location_city=%E5%8C%97%E4%BA%AC','Host': 'www.lagou.com','Origin': 'https://www.lagou.com','Referer': 'https://www.lagou.com/jobs/list_python?city=%E5%8C%97%E4%BA%AC&cl=false&fromSearch=true&labelWords=&suginput=',"User-Agent": self.user_agent_str,'X-Anit-Forge-Code': 0,'X-Anit-Forge-Token': 'None','X-Requested-With': 'XMLHttpRequest'}

req= request.Request(url=base_url,data=bytes(data,encoding='utf-8'),headers=headers)

response=request.urlopen(req)

html=response.read()

html= html.decode('utf-8')#使用json格式化,生成一个字典,然后从字典里头取值就可以,下面就是取值的过程,想要啥就可以啥

json_data =json.loads(html)#print(json_data)

positionResult = json_data['content']['positionResult']#print(positionResult)

self.result_list = positionResult['result']print(self.result_list)

self.workbook()defworkbook(self):for result inself.result_list:#print(len(result))

#print(len(result))

#print(result)

self.worksheet1(result)

self.number+= 1

print(self.number)print('~~~~~~~~~~~~~~~~~~~~~haha~~~~~~~~~~~~~~~~~~~~~~~')#self.workbook_attr.close()

#print(self.number)

defworksheet1(self,result):#print(self.worksheet,result,self.number)

keys =list(result.keys())if self.number ==0:

self.keys_list=keysfor i inrange(len(keys)):#向work中添加数据 def write(self, row, col, *args):

#write第一个参数是行,第二个参数是列,第三个是要写入的参数名称

print(keys[i], type(keys[i]),i)

self.worksheet.write(0, i, str(keys[i]))print('~~~~~~~~~~~~~~~~~~~~0~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')

values=[]for k inself.keys_list:

values.append(result[k])#print(type(keys))

#values = list(result.values())

#print(type(values))

for i inrange(len(values)):#向work中添加数据 def write(self, row, col, *args):

#write第一个参数是行,第二个参数是列,第三个是要写入的参数名称

print(str(values[i]),i,len(values))

self.worksheet.write(self.number+1, i, str(values[i]))print('------------------------1-------------------------------')#workbook.close()

if __name__ == '__main__':

lagou_python= python_position(1,0)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值