用scrapy框架爬取拉勾网的全站招聘信息

## 文章开头做个说明,拉勾网的反爬机制为利用scrapy框架的cookie来识别你的身份,所以要在settings里面的COOKIES_ENABLED = False的注释打开,然后再全局里面加上拉勾网自己的cookie信息,然后程序就能运行起来了

DEFAULT_REQUEST_HEADERS = {
‘Accept’: ‘text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8’,
‘Accept-Language’: ‘en’,
‘User-Agent’ : ‘Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36’,
‘Cookie’ : ‘_ga=GA1.2.2099113890.1534936734; user_trace_token=20180822191855-287db68b-a5fd-11e8-9d3f-525400f775ce; LGUID=20180822191855-287dbc2a-a5fd-11e8-9d3f-525400f775ce; index_location_city=%E5%8C%97%E4%BA%AC; fromsite=”localhost:63342”; JSESSIONID=ABAAABAAAFCAAEGFD51330D304C813384D17099E6E0ABD4; _gid=GA1.2.233681856.1535974711; _gat=1; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1535588512,1535588697,1535589004,1535974711; TG-TRACK-CODE=index_navigation; SEARCH_ID=eaec941faa014acbbe5c2c65ebe8187c; LGSID=20180903193836-e4f5d61f-af6d-11e8-8570-525400f775ce; PRE_UTM=; PRE_HOST=; PRE_SITE=https%3A%2F%2Fwww.lagou.com%2F; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Fzhaopin%2Fqukuailian%2F; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1535974717; LGRID=20180903193838-e64d8ccc-af6d-11e8-8570-525400f775ce’
}


import scrapy
import re
from my_project.items import LagouItem
class LagouSpider(scrapy.Spider):
    name = 'lagou'
    allowed_domains = ['lagou.com']
    start_urls = ['http://lagou.com/']

    def parse(self, response):
        #通过首页找到所有列表页url
        res_hrefs = re.findall(r'a href="(.*)" data-lg-tj-id="4O00"',response.text)
        for res_href in res_hrefs:
            yield scrapy.Request(url=res_href,callback=self.parse_details)

    def parse_details(self,response):
        #找到所有详情页url
        res_detail = re.findall(r'link" href="(.*)" target="_blank" ',response.text)
        del res_detail[-1]

        for res_info in res_detail:
            yield scrapy.Request(url=res_info,callback=self.parse_info)
        #找到下一页的url
        next_page = re.findall(r'a href="(.*)" class="page_no"',response.text)
        try:
            yield scrapy.Request(url=next_page[-1],callback=self.parse_details)
        except:
            pass
    def parse_info(self,response):
        # with open('222.html', 'wb') as f:
        #     f.write(response.body)
        #获取所有想要的信息
        try:
            lg_title = re.findall(r'span class="name">(.*)</',response.text)[0]
            print(lg_title)
            lg_salary = re.findall(r'salary">(.*)</span',response.text)[0]
            print(lg_salary)
            lg_address = re.findall(r'span>/(.*) /</span',response.text)[0]
            print(lg_address)
            lg_experience = re.findall(r'span>(.*) /</span',response.text)[1]
            print(lg_experience)
            lg_study = re.findall(r'pan>(.*) /</sp',response.text)[2]
            print(lg_study)
            lg_create_time = re.findall(r'class="publish_time">(.*)&nbsp; 发布于拉勾网</p>',response.text)[0]
            print(lg_create_time)
            lg_job_description = response.xpath('//dd[@class="job_bt"]/div/p/text()').extract()
            lg_job_description = ''.join(lg_job_description)
            print(lg_job_description)
            lagou = '拉勾网'
            print(lagou)
        except:
            pass


        item = LagouItem()

        item['lg_title'] = lg_title
        item['lg_salary'] = lg_salary
        item['lg_address'] = lg_address
        item['lg_experience'] = lg_experience
        item['lg_study'] = lg_study
        item['lg_create_time'] = lg_create_time
        item['lg_job_description'] = lg_job_description
        item['lagou'] = lagou

        yield item

  • 4
    点赞
  • 10
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值