1 定义抓取内容
在items.py中添加如下代码:
class Job51Item(scrapy.Item):
title = scrapy.Field()
salary = scrapy.Field()
job_city = scrapy.Field()
requirement = scrapy.Field() # 经验、学历、人数
company_name = scrapy.Field()
company_size = scrapy.Field()
publish_date = scrapy.Field()
job_advantage_tags = scrapy.Field() # 福利
url = scrapy.Field()
2 编写爬虫
由于51job的反爬虫机制,其招聘信息数据并不是如游览器一般直接展示出来,而是隐藏在script标签内
在spiders文件夹新建文件job_51.py,并编写如下代码:
# -*- coding: utf-8 -*-
import json
import re
import scrapy
from scrapy.http import Request
from job.items import Job51Item
class Job51Spider(scrapy.Spider):
name = 'job51'
allowed_domains = ['jobs.51job.com', 'search.51job.com']
start_urls = [
'https://search.51job.com/list/090200,000000,0000,00,9,99,python,2,'
'3.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0'
'&dibiaoid=0&line=&welfare=']
def parse(self, response):
body = response.body.decode("gbk")
with open("b.html","w",encoding="utf-8") as f:
f.write(body)
data = re.findall('window.__SEARCH_RESULT__ =(.+)}</script>', str(body))[0] + "}"
data = json.loads(data)
item = Job51Item()
for result in data["engine_search_result"]:
item["requirement"] = result["attribute_text"]
item["url"] = result["job_href"]
item["title"] = result["job_name"]
item["salary"] = result["issuedate"]
item["job_city"] = result["workarea_text"]
item["publish_date"] = result["issuedate"]
item["job_advantage_tags"] = result["jobwelf"]
item["company_name"] = result["company_name"]
item["company_size"] = result["companysize_text"]
yield item
for i in range(2, 10):
url = f"https://search.51job.com/list/090200,000000,0000,00,9,99,python,2,{i}.html?lang=c&postchannel" \
f"=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line" \
f"=&welfare= "
yield Request(url=url, callback=self.parse, )
3 定义管道
3.1 启用pipelines
在settings.py中添加如下代码:
ITEM_PIPELINES = {
'job.pipelines.JobPipeline': 300,
}
注:或者取消该部分内容注释
3.2 编写管道代码
在pipelines.py编写如下代码:
import json
from itemadapter import ItemAdapter
class JobPipeline:
def __init__(self):
self.fp = open('result.json', 'w', encoding='utf-8')
def open_spider(self, spider):
self.fp.write("[")
def process_item(self, item, spider):
data = json.dumps(dict(item), ensure_ascii=False)
self.fp.write(data+',\n')
return item
def close_spider(self, spider):
self.fp.write("]")
self.fp.close()
print("spider end")