创建爬虫
scrapy startproject boos
cd boos
scrapy gensipder -t crawl zhiping “zhipin.com”
爬虫代码
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from pa_chong.Scrapy.boos.boos.items import BoosItem
class ZhipingSpider(CrawlSpider):
name = 'zhiping'
allowed_domains = ['zhipin.com']
start_urls = ['https://www.zhipin.com/c100010000/?query=python&page=1']
rules = (
# 匹配职位列表页url规则
Rule(LinkExtractor(allow=r'.+\?query=python&page=\d'), follow=True),
# 匹配职位详情页url规则
Rule(LinkExtractor(allow=r'.+job_detail/.+~\.html'), callback='parse_job', follow=False),
)
# 解析职位详情
def parse_job(self, response):
title = response.xpath('//div[@class="name"]/h1/text()').get().strip()
salary = response.xpath('//span[@class="badge"]/text()').get().strip()
job_info = response.xpath('//div[@class="job-primary detail-box"]/div{@class="info-primary"]/p//text()').getall()
city = job_info[0]
work_years = job_info[1]
education = job_info[2]
company = response.xpath('//div[@class="info-company"]/h3[@class="name"]/a/text()').get().strip()
itme = BoosItem(title=title, salary=salary, city=city, work_years=work_years, education=education, company=company)
return itme