pythonz之Scrapy+selenium爬取腾讯招聘案例

  1. 运行命令创建项目:scrapy startproject seleniumScrapySpider
  2. 进入项目目录:cd .\seleniumScrapySpider\
  3. 运行命令创建爬虫:scrapy genspider seleniumTxWork careers.tencent.com
  4. 目录结构说明
    |-- scrapySpider 项目目录
    |   |-- scrapySpider 项目目录
    |   |   |-- spiders 爬虫文件目录
    |   |   |   |-- seleniumTxWork.py 爬虫文件
    |   |   |-- items.py 定义数据模型文件,类似于数据库表的模式或数据结构的定义
    |   |   |-- middlewares.py 定义中间件文件,用于对请求和响应进行处理和修改
    |   |   |-- pipelines.py 定义数据处理管道(Pipeline)文件,用于处理爬取到的数据的组件
    |   |   |-- settings.py 定义配置文件,用于配置 Scrapy 项目的各种设置选项和参数
    |   |-- scrapy.cfg 框架中的配置文件,用于指定项目的结构和元数据信息
    
  5. 设置全局chrome驱动,把chrome驱动文件复制到miniconda根目录,不设置驱动运行程序会报“‘chromedriver’ executable needs to be in PATH”错误
  6. 创建快代理文件scrapySpider>kuaidaili.py:https://www.kuaidaili.com/
    import requests
    
    
    class Kuaidaili():
        request_url = {
            # 获取代理ip前面
            'getIpSignature': 'https://auth.kdlapi.com/api/get_secret_token',
            # 获取代理ip
            'getIp': 'https://dps.kdlapi.com/api/getdps?secret_id=oy2q5xu76k4s8olx59et&num=1&signature={}'
        }
    
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
        }
    
        ip_use = '购买代理用户名'
        ip_password = '购买代理密码'
    
        def __init__(self):
            '''创建request会话对象'''
            self.request_session = requests.Session()
            self.request_session.headers.update(self.headers)
    
        # 获取代理ip签名
        @classmethod
        def get_ip_url(cls):
            par = {
                'secret_id': 'oy2q5xu76k4s8olx59et',
                'secret_key': '5xg6gvouc0vszfw0kxs1a8vrw1r6ity7'
            }
            response = requests.post(cls.request_url['getIpSignature'],data=par)
            response_data = response.json()
            return cls.request_url['getIp'].format(response_data['data']['secret_token'])
    
        @classmethod
        def get_ip(cls):
            url = cls.get_ip_url()
            response = requests.get(url)
            return f'http://{cls.ip_use}:{cls.ip_password}@{response.text}/'
    
    if __name__ == '__main__':
        kuaidaili = Kuaidaili()
        print(kuaidaili.get_ip())
    
  7. 爬取腾讯招聘案例
  • seleniumTxWork.py
    import scrapy
    import scrapy.cmdline
    from scrapy.http import HtmlResponse, Request
    from seleniumScrapySpider.items import SeleniumScrapySpiderItem
    import loguru
    import re
    
    
    class SeleniumTxWorkSpider(scrapy.Spider):
        name = "seleniumTxWork"
        allowed_domains = ["careers.tencent.com"]
        start_urls = ["https://careers.tencent.com/search.html?index={}&keyword=python"]
    
        def parse(self, response: HtmlResponse, **kwargs):
            work_list = response.xpath('//div[@class="recruit-list"]')
            reg = re.compile(r'\n|\s')
            for work in work_list:
                work_item = SeleniumScrapySpiderItem()
                work_item['workName'] = re.sub(reg, '',
                                               work.xpath('.//span[@class="job-recruit-title"]/text()').extract_first())
                work_item['workAddress'] = re.sub(reg, '', work.xpath(
                    './/span[@class="job-recruit-location"]/text()').extract_first())
                work_item['workResponsibility'] = re.sub(reg, '',
                                                         work.xpath('.//p[@class="recruit-text"]/text()').extract_first())
                work_item['workYearsName'] = re.sub(reg, '', work.xpath(
                    './/p[@class="recruit-tips"]/span[5]/text()').extract_first())
                yield work_item
            '''根据分页总数获取其他页数据'''
            page_total = int(response.xpath('//ul[@class="page-list"]/li[last() - 1]/span/text()').extract_first())
            active_page = int(
                response.xpath('//ul[@class="page-list"]/li[@class="page-li active"]/span/text()').extract_first())
            if page_total != active_page:
                active_page += 1
                loguru.logger.info(f'******************总页数{page_total},开始抓取第 {active_page} 页数据******************')
                yield Request(url=self.start_urls[0].format(active_page))
            else:
                loguru.logger.info('******************数据已全部抓取完毕******************')
    
        def start_requests(self):
            loguru.logger.info('******************开始抓取第 1 页数据******************')
            yield Request(url=self.start_urls[0].format(1))
            '''获取固定页数数据'''
            # for i in range(1,6):
            #     loguru.logger.info(f'******************开始抓取第 {i} 页数据******************')
            #     yield Request(url=self.start_urls[0].format(i))
    
    
    if __name__ == '__main__':
        scrapy.cmdline.execute('scrapy crawl seleniumTxWork'.split())
    
    
  • settings.py
    # Scrapy settings for seleniumScrapySpider project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     https://docs.scrapy.org/en/latest/topics/settings.html
    #     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    #     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = "seleniumScrapySpider"
    
    SPIDER_MODULES = ["seleniumScrapySpider.spiders"]
    NEWSPIDER_MODULE = "seleniumScrapySpider.spiders"
    #需要换上自己SCRAPEOPS中的key
    SCRAPEOPS_API_KEY = 'b0a936d4-762b-4270-9e87-a2883c2a683e'
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    # USER_AGENT = "seleniumScrapySpider (+http://www.yourdomain.com)"
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    # CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    # DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    # CONCURRENT_REQUESTS_PER_DOMAIN = 16
    # CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    # COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    # TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    DEFAULT_REQUEST_HEADERS = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "Accept-Language": "en",
    }
    
    # Enable or disable spider middlewares
    # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    # SPIDER_MIDDLEWARES = {
    #    "seleniumScrapySpider.middlewares.SeleniumscrapyspiderSpiderMiddleware": 543,
    # }
    
    # Enable or disable downloader middlewares
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    DOWNLOADER_MIDDLEWARES = {
        "seleniumScrapySpider.middlewares.SeleniumDownloaderMiddleware": 543,
        'scrapeops_scrapy.middleware.retry.RetryMiddleware': 550,
    }
    
    # Enable or disable extensions
    # See https://docs.scrapy.org/en/latest/topics/extensions.html
    EXTENSIONS = {
        'scrapeops_scrapy.extension.ScrapeOpsMonitor': 500,
    }
    
    # Configure item pipelines
    # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
        "seleniumScrapySpider.pipelines.MysqlPipeLine": 300,
    }
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
    # AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    # AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    # AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    # AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    # HTTPCACHE_ENABLED = True
    # HTTPCACHE_EXPIRATION_SECS = 0
    # HTTPCACHE_DIR = "httpcache"
    # HTTPCACHE_IGNORE_HTTP_CODES = []
    # HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
    
    # Set settings whose default value is deprecated to a future-proof value
    REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
    TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
    FEED_EXPORT_ENCODING = "utf-8"
    
    # 日志配置
    LOG_FILE = 'log.log'
    LOG_FILE_APPEND = False
    LOG_LEVEL = 'INFO'
    
    
  • items.py
    # Define here the models for your scraped items
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/items.html
    
    import scrapy
    
    
    class SeleniumScrapySpiderItem(scrapy.Item):
       workName = scrapy.Field()
       workAddress = scrapy.Field()
       workResponsibility = scrapy.Field()
       workYearsName = scrapy.Field()
    
    
  • pipelines.py
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    
    
    # useful for handling different item types with a single interface
    from itemadapter import ItemAdapter
    import pymysql
    import loguru
    
    class MysqlPipeLine:
    
        def open_spider(self, spider):
            self.spider = spider
            self.mysql = pymysql.connect(host='localhost',port=3306,user='root',password='root')
            self.cursor = self.mysql.cursor()
            self.create_db('job')
    
        '''创建数据库'''
        def create_db(self,db_name):
             sql = f'''CREATE DATABASE IF NOT EXISTS {db_name}'''
             try:
                 self.cursor.execute(sql)
                 self.mysql.select_db(db_name)
                 self.create_job_table()
             except Exception as e:
                 loguru.logger.info(f'创建{db_name}数据库失败:{e}')
    
        '''创建表'''
        def create_job_table(self):
            sql = '''
                 CREATE TABLE IF NOT EXISTS txWork(
                        workId INT AUTO_INCREMENT,
                        workName VARCHAR(255),
                        workAddress VARCHAR(255),
                        workResponsibility TEXT,
                        workYearsName VARCHAR(255),
                        PRIMARY KEY(workId)        
                )
               '''
            try:
                self.cursor.execute(sql)
            except Exception as e:
                loguru.logger.info(f'创建TxWork表失败:{e}')
    
        def process_item(self, item, spider):
            sql = f'''INSERT INTO txWork(workName,workAddress,workResponsibility,workYearsName) VALUES(%(workName)s,%(workAddress)s,%(workResponsibility)s,%(workYearsName)s)'''
            try:
                self.cursor.execute(sql, dict(item))
                self.mysql.commit()
                loguru.logger.info(f'”{item["workName"]}”插入成功')
            except Exception as e:
                loguru.logger.info(f'”{item["workName"]}”插入失败:{e}')
                self.mysql.rollback()
            return item
    
        def close_spider(self,spider):
            self.mysql.close()
    
    
  • middlewares.py
    # Define here the models for your spider middleware
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    
    from scrapy import signals
    from scrapy.http import HtmlResponse
    from selenium.webdriver import Chrome,ChromeOptions
    from selenium.webdriver.support.wait import WebDriverWait
    from selenium.webdriver.support import expected_conditions
    from selenium.webdriver.common.by import By
    from fake_useragent import UserAgent
    from seleniumScrapySpider.kuaidaili import Kuaidaili
    import loguru
    
    # useful for handling different item types with a single interface
    from itemadapter import is_item, ItemAdapter
    
    
    class SeleniumDownloaderMiddleware:
    
        def __init__(self):
            chrome_options = ChromeOptions()
            chrome_options.add_experimental_option('detach', True)  # 不自动关闭浏览器
            chrome_options.add_experimental_option('excludeSwitches',['enable-automation'])  # 以开发者模式启动调试chrome,可以去掉提示受到自动软件控制
            chrome_options.add_experimental_option('useAutomationExtension', False)  # 去掉提示以开发者模式调用
            chrome_options.add_argument('--start-maximized')  # 最大化运行(全屏窗口)
            self.browser = Chrome(options=chrome_options)
            self.ua = UserAgent()
            self.kuaidaili = Kuaidaili()
            #初始化一个代理ip
            self.first_ip = self.kuaidaili.get_ip()
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
            return s
    
        def process_request(self, request, spider):
            # 设置UA
            request.headers['User-Agent'] = self.ua.random
            # 设置代理
            request.meta['proxy'] = self.first_ip
            request.meta['download_timeout'] = 5
            return None
    
        def process_response(self, request, response, spider):
            loguru.logger.info(f'代理ip:{request.meta["proxy"]}')
            if response.status == 200:
                self.browser.get(request.url)
                wait = WebDriverWait(self.browser, 10)  # 设置等待时间10s
                wait.until(expected_conditions.presence_of_element_located((By.CLASS_NAME,'recruit-list')))  # 设置判断条件:等待id='kw'的元素加载完成
                return HtmlResponse(url=request.url,body=self.browser.page_source,encoding='utf-8',request=request)
    
            # 代理失效小重新设置代理,并返回request重新请求
            request.meta['proxy'] = self.kuaidaili.get_ip()
            request.meta['download_timeout'] = 2
            return request
    
        def process_exception(self, request, exception, spider):
            # Called when a download handler or a process_request()
            # (from other downloader middleware) raises an exception.
    
            # Must either:
            # - return None: continue processing this exception
            # - return a Response object: stops process_exception() chain
            # - return a Request object: stops process_exception() chain
            pass
    
        def spider_closed(self, spider):
            self.browser.close()
    
    
  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值