BOT_NAME = 'redisPro'
SPIDER_MODULES = ['redisPro.spiders']
NEWSPIDER_MODULE = 'redisPro.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'redisPro (+http://www.yourdomain.com)'
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
ITEM_PIPELINES = {
# 'redisPro.pipelines.RedisproPipeline': 300,
'scrapy_redis.pipelines.RedisPipeline': 300,
}
# 使用scrapy-redis组件的去重队列
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 使用scrapy-redis组件自己的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 是否允许暂停
SCHEDULER_PERSIST = True
# 如果redis服务器不在自己本机,则需要如下配置
REDIS_HOST = '172.20.10.9' # 存储到的redsi服务器
REDIS_PORT = 6379
RUN__IT # scrapy-redis配置文件setting
最新推荐文章于 2022-04-27 12:14:08 发布