- seleniumTxWork.py
import scrapy
import scrapy.cmdline
from scrapy.http import HtmlResponse, Request
from seleniumScrapySpider.items import SeleniumScrapySpiderItem
import loguru
import re
class SeleniumTxWorkSpider(scrapy.Spider):
name = "seleniumTxWork"
allowed_domains = ["careers.tencent.com"]
start_urls = ["https://careers.tencent.com/search.html?index={}&keyword=python"]
def parse(self, response: HtmlResponse, **kwargs):
work_list = response.xpath('//div[@class="recruit-list"]')
reg = re.compile(r'\n|\s')
for work in work_list:
work_item = SeleniumScrapySpiderItem()
work_item['workName'] = re.sub(reg, '',
work.xpath('.//span[@class="job-recruit-title"]/text()').extract_first())
work_item['workAddress'] = re.sub(reg, '', work.xpath(
'.//span[@class="job-recruit-location"]/text()').extract_first())
work_item['workResponsibility'] = re.sub(reg, '',
work.xpath('.//p[@class="recruit-text"]/text()').extract_first())
work_item['workYearsName'] = re.sub(reg, '', work.xpath(
'.//p[@class="recruit-tips"]/span[5]/text()').extract_first())
yield work_item
'''根据分页总数获取其他页数据'''
page_total = int(response.xpath('//ul[@class="page-list"]/li[last() - 1]/span/text()').extract_first())
active_page = int(
response.xpath('//ul[@class="page-list"]/li[@class="page-li active"]/span/text()').extract_first())
if page_total != active_page:
active_page += 1
loguru.logger.info(f'******************总页数{page_total},开始抓取第 {active_page} 页数据******************')
yield Request(url=self.start_urls[0].format(active_page))
else:
loguru.logger.info('******************数据已全部抓取完毕******************')
def start_requests(self):
loguru.logger.info('******************开始抓取第 1 页数据******************')
yield Request(url=self.start_urls[0].format(1))
'''获取固定页数数据'''
# for i in range(1,6):
# loguru.logger.info(f'******************开始抓取第 {i} 页数据******************')
# yield Request(url=self.start_urls[0].format(i))
if __name__ == '__main__':
scrapy.cmdline.execute('scrapy crawl seleniumTxWork'.split())
- settings.py
# Scrapy settings for seleniumScrapySpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "seleniumScrapySpider"
SPIDER_MODULES = ["seleniumScrapySpider.spiders"]
NEWSPIDER_MODULE = "seleniumScrapySpider.spiders"
#需要换上自己SCRAPEOPS中的key
SCRAPEOPS_API_KEY = 'b0a936d4-762b-4270-9e87-a2883c2a683e'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "seleniumScrapySpider (+http://www.yourdomain.com)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en",
}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# "seleniumScrapySpider.middlewares.SeleniumscrapyspiderSpiderMiddleware": 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
"seleniumScrapySpider.middlewares.SeleniumDownloaderMiddleware": 543,
'scrapeops_scrapy.middleware.retry.RetryMiddleware': 550,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
EXTENSIONS = {
'scrapeops_scrapy.extension.ScrapeOpsMonitor': 500,
}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"seleniumScrapySpider.pipelines.MysqlPipeLine": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
# 日志配置
LOG_FILE = 'log.log'
LOG_FILE_APPEND = False
LOG_LEVEL = 'INFO'
- items.py
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class SeleniumScrapySpiderItem(scrapy.Item):
workName = scrapy.Field()
workAddress = scrapy.Field()
workResponsibility = scrapy.Field()
workYearsName = scrapy.Field()
- pipelines.py
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymysql
import loguru
class MysqlPipeLine:
def open_spider(self, spider):
self.spider = spider
self.mysql = pymysql.connect(host='localhost',port=3306,user='root',password='root')
self.cursor = self.mysql.cursor()
self.create_db('job')
'''创建数据库'''
def create_db(self,db_name):
sql = f'''CREATE DATABASE IF NOT EXISTS {db_name}'''
try:
self.cursor.execute(sql)
self.mysql.select_db(db_name)
self.create_job_table()
except Exception as e:
loguru.logger.info(f'创建{db_name}数据库失败:{e}')
'''创建表'''
def create_job_table(self):
sql = '''
CREATE TABLE IF NOT EXISTS txWork(
workId INT AUTO_INCREMENT,
workName VARCHAR(255),
workAddress VARCHAR(255),
workResponsibility TEXT,
workYearsName VARCHAR(255),
PRIMARY KEY(workId)
)
'''
try:
self.cursor.execute(sql)
except Exception as e:
loguru.logger.info(f'创建TxWork表失败:{e}')
def process_item(self, item, spider):
sql = f'''INSERT INTO txWork(workName,workAddress,workResponsibility,workYearsName) VALUES(%(workName)s,%(workAddress)s,%(workResponsibility)s,%(workYearsName)s)'''
try:
self.cursor.execute(sql, dict(item))
self.mysql.commit()
loguru.logger.info(f'”{item["workName"]}”插入成功')
except Exception as e:
loguru.logger.info(f'”{item["workName"]}”插入失败:{e}')
self.mysql.rollback()
return item
def close_spider(self,spider):
self.mysql.close()
- middlewares.py
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
from scrapy.http import HtmlResponse
from selenium.webdriver import Chrome,ChromeOptions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.common.by import By
from fake_useragent import UserAgent
from seleniumScrapySpider.kuaidaili import Kuaidaili
import loguru
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class SeleniumDownloaderMiddleware:
def __init__(self):
chrome_options = ChromeOptions()
chrome_options.add_experimental_option('detach', True) # 不自动关闭浏览器
chrome_options.add_experimental_option('excludeSwitches',['enable-automation']) # 以开发者模式启动调试chrome,可以去掉提示受到自动软件控制
chrome_options.add_experimental_option('useAutomationExtension', False) # 去掉提示以开发者模式调用
chrome_options.add_argument('--start-maximized') # 最大化运行(全屏窗口)
self.browser = Chrome(options=chrome_options)
self.ua = UserAgent()
self.kuaidaili = Kuaidaili()
#初始化一个代理ip
self.first_ip = self.kuaidaili.get_ip()
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_closed, signal=signals.spider_closed)
return s
def process_request(self, request, spider):
# 设置UA
request.headers['User-Agent'] = self.ua.random
# 设置代理
request.meta['proxy'] = self.first_ip
request.meta['download_timeout'] = 5
return None
def process_response(self, request, response, spider):
loguru.logger.info(f'代理ip:{request.meta["proxy"]}')
if response.status == 200:
self.browser.get(request.url)
wait = WebDriverWait(self.browser, 10) # 设置等待时间10s
wait.until(expected_conditions.presence_of_element_located((By.CLASS_NAME,'recruit-list'))) # 设置判断条件:等待id='kw'的元素加载完成
return HtmlResponse(url=request.url,body=self.browser.page_source,encoding='utf-8',request=request)
# 代理失效小重新设置代理,并返回request重新请求
request.meta['proxy'] = self.kuaidaili.get_ip()
request.meta['download_timeout'] = 2
return request
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_closed(self, spider):
self.browser.close()