增量式与分布式

基于crawlSpider的全站数据爬取
# 项目的创建
scrapy startproject projectname
scrapy genspider -t crawl spidername www.baidu.com
# crawlspider全站数据爬取:
- CrawlSpider是一个爬虫类, 是scrapy.spider的子类, 功能比spider更强大.
- CrawlSpider的机制:
    - 连接提取器: 可以根据指定的规则进行连接的提取
    - 规则解析器: 更具指定的规则对响应数据进行解析
# 案例: 基于CrawlSpider对笑话网进行全站深度数据爬取, 抓取笑话标题与内容, 并存储于MongoDB中
# item编码:
import scrapy
class JokeItem(scrapy.Item):
    title = scrapy.Field()
    content = scrapy.Field()
# spider编码:
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from..items import JokeItem


class ZSpider(CrawlSpider):
    name = 'z'
    # allowed_domains = ['www.baidu.com']
    start_urls = ['http://xiaohua.zol.com.cn/lengxiaohua/']
    link = LinkExtractor(allow=r'/lengxiaohua/\d+.html')
    link_detail = LinkExtractor(allow=r'.*?\d+\.html')
    rules = (
        Rule(link, callback='parse_item', follow=True),
        Rule(link_detail, callback='parse_detail'),
    )

    def parse_item(self, response):
        pass

    def parse_detail(self, response):
        title = response.xpath('//h1[@class="article-title"]/text()').extract_first()
        content = response.xpath('//div[@class="article-text"]//text()').extract()
        content = ''.join(content)

        if title and content:
            item = JokeItem()
            item["title"] = title
            item["content"] = content
            print(dict(item))
            yield item
# pipeline编码:
class JokePipeline(object):

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DB')
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def process_item(self, item, spider):
        self.db["joke"].insert(dict(item))
        return item

    def close(self, spider):
        self.client.close()

# 电影天堂: 全站深度抓取电影名与下载链接:
# item定义存储字段:
import scrapy


class BossItem(scrapy.Item):
    title = scrapy.Field()
    downloadlink = scrapy.Field()
# spider编码:
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import MvItem


class BSpider(CrawlSpider):
    name = 'mv'
    # allowed_domains = ['www.baidu.com']
    start_urls = ['https://www.ygdy8.net/html/gndy/oumei/index.html']
    link = LinkExtractor(allow=r'list.*?html')
    link_detail = LinkExtractor(allow=r'.*?/\d+\.html')
    rules = (
        Rule(link, callback='parse_item', follow=True,),
        Rule(link_detail, callback='parse_detail', follow=True,),
    )

    def parse_item(self, response):
        pass

    def parse_detail(self, response):
        title = response.xpath('//h1//text()').extract_first()
        downloadlink = response.xpath('//tbody/tr/td/a/text()').extract_first()
        if title and downloadlink and 'ftp' in downloadlink:
            item = BossItem()
            item['title'] = title
            item['downloadlink'] = downloadlink
            yield item
# piplines编码:
class MvPipeline(object):

    def __init__(self, mongo_uri, mongo_db):
        self.mongo_uri = mongo_uri
        self.mongo_db = mongo_db

    @classmethod
    def from_crawler(cls, crawler):
        return cls(
            mongo_uri=crawler.settings.get('MONGO_URI'),
            mongo_db=crawler.settings.get('MONGO_DB')
        )

    def open_spider(self, spider):
        self.client = pymongo.MongoClient(self.mongo_uri)
        self.db = self.client[self.mongo_db]

    def process_item(self, item, spider):
        self.db["mv"].insert(dict(item))
        return item

    def close(self, spider):
        self.client.close()

分布式
# 分布式概念:
使用多台机器组成一个分布式的机群,在机群中运行同一组程序,进行联合数据的爬取。

# 原生scrapy无法实现分布式原因:
	- 原生的scrapy中的调度器不可以被共享
	- 原生的scrapy的管道不可以被共享

# 使用scrapy实现分布式思路:
- 为原生的scrapy框架提供共享的管道和调度器
- pip install scrapy_redis
- 1. 创建工程: scrapy startproject projectname
- 2. 爬虫文件: scrapy genspider -t crawl spidername www.baidu.com
- 3. 修改爬虫文件:
	- 3.1 导包:from scrapy_redis.spiders import RedisCrawlSpider
	- 3.2 将当前爬虫类的父类进行修改RedisCrawlSpider
	- 3.3 allowed_domains,start_url注释掉,添加一个新属性redis_key='qn'(调度器队列的名称)
	- 3.4 指定redis_key = 'xxx' , 即共享调度器队列名字
	- 3.4 数据解析,将解析的数据封装到item中然后向管道提交
- 4. 配置文件的编写:
	- 4.1 指定管道:
		ITEM_PIPELINES = {
			'scrapy_redis.pipelines.RedisPipeline': 400
		}
	- 4.2 指定调度器:
		# 增加了一个去重容器类的配置, 作用使用Redis的set集合来存储请求的指纹数据, 从而实现请求去重的持久化
		DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
		# 使用scrapy-redis组件自己的调度器
		SCHEDULER = "scrapy_redis.scheduler.Scheduler"
		# 配置调度器是否要持久化, 也就是当爬虫结束了, 要不要清空Redis中请求队列和去重指纹的set。如果是True, 就表示要持久化存储, 就不清空数据, 否则清空数据
		SCHEDULER_PERSIST = True
	- 4.3 指定具体的redis:
		REDIS_HOST = 'redis服务的ip地址'
		REDIS_PORT = 6379
- 5. 修改Redis配置并指定配置启动:
	- #bind 127.0.0.1
	- protected-mode no
	- 开启redis服务(携带redis的配置文件:redis-server ./redis.windows.conf),和客户端(redis-cli):

- 6. 启动程序:scrapy runspider xxx.py(需要进入spider文件夹)
- 7. 向调度器队列中扔入一个起始的url(redis的客户端):lpush xxx www.xxx.com
	(xxx表示的就是redis_key的值)
# 案例: 阳光热线问政平台投诉信息爬取
# 网址: http://wz.sun0769.com/index.php/question/questionType?type=4
# items编码:
import scrapy
class FbsproItem(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
# spider编码:
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy_redis.spiders import RedisCrawlSpider
from fbspro.items import FbsproItem  
class TestSpider(RedisCrawlSpider):
    name = 'test'  
    # allowed_domains = ['ww.baidu.com']
    # start_urls = ['http://ww.baidu.com/']
    redis_key = 'urlscheduler'
    link = LinkExtractor(allow=r'.*?&page=\d+')
    rules = (
        Rule(link, callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        a_lst = response.xpath('//a[@class="news14"]')
        for a in a_lst:
            title = a.xpath('./text()').extract_first()
            # print(title)
            item = FbsproItem()
            item['title'] = title
            yield item

# settings配置编码:
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'
ROBOTSTXT_OBEY = False
CONCURRENT_REQUESTS = 3
ITEM_PIPELINES = {
   # 'fbspro.pipelines.FbsproPipeline': 300,
    'scrapy_redis.pipelines.RedisPipeline': 400
}
# 增加了一个去重容器类的配置, 作用使用Redis的set集合来存储请求的指纹数据, 从而实现请求去重的持久化
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
# 使用scrapy-redis组件自己的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 配置调度器是否要持久化, 也就是当爬虫结束了, 要不要清空Redis中请求队列和去重指纹的set。如果是True, 就表示要持久化存储, 就不清空数据, 否则清空数据
SCHEDULER_PERSIST = True

# redis配置
REDIS_HOST = '192.168.12.198'
REDIS_PORT = 6379

增量式
# 概念:
	- 检测网站数据更新, 只爬取更新的内容
	- 核心: 去重
        - url
        - 数据指纹
# 增量式爬虫: 电影名称与电影类型的爬取
# url: https://www.4567tv.co/list/index1.html
# items编码:
import scrapy
class MvproItem(scrapy.Item):
    title = scrapy.Field()
    position = scrapy.Field()
# spider编码:
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from redis import Redis
from mvpro.items import MvproItem


class MoveSpider(CrawlSpider):
    conn = Redis('127.0.0.1', 6379)
    name = 'move'
    # allowed_domains = ['www.baidu.com']
    start_urls = ['https://www.4567tv.co/list/index1.html']
    link = LinkExtractor(allow=r'/list/index1-\d+\.html')
    rules = (
        Rule(link, callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        li_list = response.xpath('//div[contains(@class, "index-area")]/ul/li')
        for li in li_list:
            mv_link = 'https://www.4567tv.co' + li.xpath('./a/@href').extract_first()
            ex = self.conn.sadd('mv_link', mv_link)
            if ex:
                print('有新数据可以爬取..........................')
                yield scrapy.Request(url=mv_link, callback=self.parse_detail)
            else:
                print('没有新数据可以爬取!!!!!!!!!!!!!!!!!!!!!!!!!')

    def parse_detail(self, response):
        title = response.xpath('//dt[@class="name"]/text()').extract_first()
        pro = response.xpath('//div[@class="ee"]/text()').extract_first()
        item = MvproItem()
        item['title'] = title
        item['position'] = pro
        yield item
# 需求: 基于数据指纹的增量式爬虫, 爬取糗百文字
# spider编码:
import scrapy
from qiubai.items import QiubaiItem
import hashlib
from redis import Redis

class QbSpider(scrapy.Spider):
    conn = Redis('127.0.0.1', 6379)
    name = 'qb'
    # allowed_domains = ['www.baidu.com']
    start_urls = ['https://www.qiushibaike.com/text/']

    def parse(self, response):
        div_list = response.xpath('//div[@id="content-left"]/div')

        for div in div_list:
            content = div.xpath('./a[1]/div[@class="content"]/span[1]/text()').extract_first()
            fp = hashlib.md5(content.encode('utf-8')).hexdigest()
            ex = self.conn.sadd('fp', fp)
            if ex:
                print('有更新数据需要爬取........................')
                item = QiubaiItem()
                item['content'] = content
                yield item
            else:
                print('没有数据更新!!!!!!!!!!!!!!!!!!!!!!!!')

scrapy提高数据爬取效率
1.增加并发:
默认scrapy开启的并发线程为32个,可以适当进行增加。在settings配置文件中修改CONCURRENT_REQUESTS = 100值为100,并发设置成了为100。

2.降低日志级别:
    在运行scrapy时,会有大量日志信息的输出,为了减少CPU的使用率。可以设置log输出信息为INFO或者ERROR即可。在配置文件中编写:LOG_LEVEL = ‘INFO’

3.禁止cookie:
    如果不是真的需要cookie,则在scrapy爬取数据时可以禁止cookie从而减少CPU的使用率,提升爬取效率。在配置文件中编写:COOKIES_ENABLED = False

4.禁止重试:
    对失败的HTTP进行重新请求(重试)会减慢爬取速度,因此可以禁止重试。在配置文件中编写:RETRY_ENABLED = False

5.减少下载超时:
    如果对一个非常慢的链接进行爬取,减少下载超时可以能让卡住的链接快速被放弃,从而提升效率。在配置文件中进行编写:DOWNLOAD_TIMEOUT = 10 超时时间为10s

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值