scrapy爬取前程无忧51job网职位信息并存储到数据库

  1. spiders中代码如下
import scrapy
from scrapy import Request
from QianCheng.items import QianchengItem
import re

class ExampleSpider(scrapy.Spider):
    name = '51job'

    def start_requests(self):
        url_str = 'https://www.51job.com/zhengzhou/'
        yield Request(url=url_str,callback=self.parse,dont_filter=True,meta={'page':'0'})
        
    def parse(self, response):
        contents = response.xpath('//div[@class = "el"]')
        for i in contents:
            urls = i.xpath('p/span[1]/a[@href]/@href').extract()
            for urll in urls:
                yield Request(url=urll,callback=self.parse_dail,meta={'page':'1'})
        if re.search(r'search',response.url):
            yield Request(url = response.url,callback=self.parse,meta={'page':'2'})    #标记page,再中间件中识别并进行翻页操作

    def parse_dail(self,response):
        job_name = response.xpath('//h1[@title]/@title').extract()
        company =response.xpath('//p[@class="cname"]/a[@title]/@title').extract()
        saray = response.xpath('//div[@class="cn"]/strong/text()').extract()
        company_desc = response.xpath('//div[@class="tmsg inbox"]/text()').extract()
        qianchengs = QianchengItem()
        qianchengs['job_name'] = ''.join(job_name)
        qianchengs['company'] = ''.join(company)
        qianchengs['saray'] = ''.join(saray)
        qianchengs['company_desc'] = ''.join(company_desc).strip()
        yield qianchengs
  1. scrapy.items
import scrapy

class QianchengItem(scrapy.Item):

    job_name = scrapy.Field()
    company= scrapy.Field()
    saray= scrapy.Field()
    company_desc= scrapy.Field()
  1. scrapy.piplines获取数据并进行存储操作
import sqlite3
  
class QianchengPipeline(object):

    def __init__(self):
        self.conn = sqlite3.connect("qiancheng.db")
        self.cursor = self.conn.cursor()
        self.cursor.execute("create table IF NOT EXISTS zhaopin(job_name varchar(200),company varchar(500),saray varchar(100),company_desc varchar(100))")

    def process_item(self, item, spider):
        self.cursor.execute("insert into zhaopin values('%s','%s','%s','%s')"%(item["job_name"],item["company"],item["saray"],item["company_desc"]))
        self.conn.commit()
        return item

  1. 中间件中
    from selenium import webdriver
    from selenium.webdriver.firefox.options import Options as FOptions
    import time
    from scrapy.http import HtmlResponse

class SeleniumMiddlewares(object):

def __init__(self):
    self.options = FOptions()
    #self.options.add_argument("-headless")
    self.browser = webdriver.Firefox(executable_path="/home/hello/Downloads/geckodriver",firefox_options=self.options)
def process_request(self,request,spider):
    if int(request.meta['page']) == 0:
        self.browser.get(request.url)
        input_name =self.browser.find_element_by_xpath('//*[@id="kwdselectid"]')
        input_name.click()
        input_name.send_keys('python')
        btn_seacher = self.browser.find_element_by_xpath('//*[@id="supp"]/div[1]/div/div[1]/button')
        btn_seacher.click()
        time.sleep(3)

    if int(request.meta['page']) == 1:
        self.browser.get(request.url)
        time.sleep(3)
    if int(request.meta['page']) == 2:
        self.browser.get(request.url)
        next_page = self.browser.find_element_by_xpath('//a[contains(text(),"下一页")]')
        next_page.click()

    return HtmlResponse(url=self.browser.current_url, body=self.browser.page_source, encoding="utf-8",
                        request=request)
  1. scrapy.setting中:
BOT_NAME = 'QianCheng'
SPIDER_MODULES = ['QianCheng.spiders']
NEWSPIDER_MODULE = 'QianCheng.spiders'
ROBOTSTXT_OBEY = False
DOWNLOADER_MIDDLEWARES = {
   'QianCheng.middlewares.SeleniumMiddlewares': 543,
}
ITEM_PIPELINES = {
   'QianCheng.pipelines.QianchengPipeline': 300,
}
  1. 存储结果如下:
    数据库存储内容
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值