def process_spider_exception(self, response, exception, spider):
Called when a spider or process_spider_input() method
(from other spider middleware) raises an exception.
Should return either None or an iterable of Response, dict
or Item objects.
pass
def process_start_requests(self, start_requests, spider):
Called with the start requests of the spider, and works
similarly to the process_spider_output() method, except
that it doesn’t have a response associated.
Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info(‘Spider opened: %s’ % spider.name)
class Zol2DownloaderMiddleware(object):
Not all methods need to be defined. If a method is not defined,
scrapy acts as if the downloader middleware does not modify the
passed objects.
@classmethod
def from_crawler(cls, crawler):
This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
Called for each request that goes through the downloader
middleware.
Must either:
- return None: continue processing this request
- or return a Response object
- or return a Request object
- or raise IgnoreRequest: process_exception() methods of
installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
Called with the response returned from the downloader.
Must either;
- return a Response object
- return a Request object
- or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
Called when a download handler or a process_request()
(from other downloader middleware) raises an exception.
Must either:
- return None: continue processing this exception
- return a Response object: stops process_exception() chain
- return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info(‘Spider opened: %s’ % spider.name)
settings.py
-- coding: utf-8 --
Scrapy settings for zol2 project
For simplicity, this file contains only settings considered important or
commonly used. You can find more settings consulting the documentation:
https://doc.scrapy.org/en/latest/topics/settings.html
https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = ‘zol2’
SPIDER_MODULES = [‘zol2.spiders’]
NEWSPIDER_MODULE = ‘zol2.spiders’
Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = ‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36’
Obey robots.txt rules
ROBOTSTXT_OBEY = True
Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
Configure a delay for requests for the same website (default: 0)
See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
See also autothrottle settings and docs
DOWNLOAD_DELAY = 0.5
The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
Disable cookies (enabled by default)
#COOKIES_ENABLED = False
Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
‘Accept’: ‘text/html,application/xhtml+xml,application/xml;q=0.9,/;q=0.8’,
‘Accept-Language’: ‘en’,
#}
Enable or disable spider middlewares
See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
‘zol2.middlewares.Zol2SpiderMiddleware’: 543,
#}
Enable or disable downloader middlewares
See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
‘zol2.middlewares.Zol2DownloaderMiddleware’: 543,
#}
Enable or disable extensions
See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
‘scrapy.extensions.telnet.TelnetConsole’: None,
#}
Configure item pipelines
See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
‘zol2.pipelines.Zol2Pipeline’: 300,
}
IMAGES_STORE = “/home/pyvip/env_spider/zol2/zol2/images”
Enable and configure the AutoThrottle extension (disabled by default)
See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
The average number of requests Scrapy should be sending in parallel to
each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
Enable and configure HTTP caching (disabled by default)
See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = ‘httpcache’
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = ‘scrapy.extensions.httpcache.FilesystemCacheStorage’
pazol2.py
-- coding: utf-8 --
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from zol2.items import Zol2Item
class Pazol2Spider(CrawlSpider):
name = ‘pazol2’
allowed_domains = [‘desk.zol.com.cn’]
自我介绍一下,小编13年上海交大毕业,曾经在小公司待过,也去过华为、OPPO等大厂,18年进入阿里一直到现在。
深知大多数Python工程师,想要提升技能,往往是自己摸索成长或者是报班学习,但对于培训机构动则几千的学费,着实压力不小。自己不成体系的自学效果低效又漫长,而且极易碰到天花板技术停滞不前!
因此收集整理了一份《2024年Python开发全套学习资料》,初衷也很简单,就是希望能够帮助到想自学提升又不知道该从何学起的朋友,同时减轻大家的负担。
既有适合小白学习的零基础资料,也有适合3年以上经验的小伙伴深入学习提升的进阶课程,基本涵盖了95%以上Python开发知识点,真正体系化!
由于文件比较大,这里只是将部分目录大纲截图出来,每个节点里面都包含大厂面经、学习笔记、源码讲义、实战项目、讲解视频,并且后续会持续更新
如果你觉得这些内容对你有帮助,可以添加V获取:vip1024c (备注Python)
一、Python所有方向的学习路线
Python所有方向路线就是把Python常用的技术点做整理,形成各个领域的知识点汇总,它的用处就在于,你可以按照上面的知识点去找对应的学习资源,保证自己学得较为全面。
二、学习软件
工欲善其事必先利其器。学习Python常用的开发软件都在这里了,给大家节省了很多时间。
三、入门学习视频
我们在看视频学习的时候,不能光动眼动脑不动手,比较科学的学习方法是在理解之后运用它们,这时候练手项目就很适合了。
节省了很多时间。
三、入门学习视频
我们在看视频学习的时候,不能光动眼动脑不动手,比较科学的学习方法是在理解之后运用它们,这时候练手项目就很适合了。