scrapy crawlspider 增量和全量爬取链家网

学习目的,全量通过大地址+小地址+金额段来取分页数据,增量数据用链家提供的成交信息,标题爬一次,详情爬一次,话不多说,直接上代码

遇到的问题:链家网不定期链接会出现301,所以mate传递就会出问题,暂时未解决。这里绕过这个问题,没用mate

crawlspider:

# -*- coding: utf-8 -*-
from urllib import parse

from scrapy import Request
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from Lianjia.items import DangdangItemLoader, LianjiaItem


class LianjiaCrawlspiderSpider(CrawlSpider):
    name = 'lianjia_crawlspider'
    allowed_domains = ['xa.lianjia.com/ershoufang', 'xa.lianjia.com']
    start_urls = ['https://xa.lianjia.com/', 'https://xa.lianjia.com/chengjiao/']

    rules = (
        Rule(LinkExtractor(restrict_xpaths="//div[@data-role='ershoufang']//a"), callback='parse_position',
             follow=False),
        Rule(LinkExtractor(allow=r'chengjiao/$'), callback='parse_home', follow=False),  # follow=false 会爬取当前页面
        Rule(LinkExtractor(allow=r'chengjiao/pg\d+/'), callback='parse_home', follow=False),
    )

    def parse_home(self, response):
        # 增量爬取
        ul = response.xpath("//ul[@class='listContent']/li")
        for li in ul:
            try:
                item_loader = DangdangItemLoader(item=LianjiaItem(), response=response, selector=li)
                item_loader.add_xpath('name', './/div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_type', './/div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_size', './/div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_id', './/div[@class="info"]/div[@class="title"]/a/@href')
                item_loader.add_xpath('money_all', './/div[@class="totalPrice"]/span/text()')
                item_loader.add_xpath('money_every', './/div[@class="unitPrice"]/span/text()')
                item_loader.add_xpath('success_data', './/div[@class="dealDate"]/text()')
                item_loader.add_xpath('link', './/div[@class="info"]/div[@class="title"]/a/@href')
                item_loader.add_xpath('img', './/a/img/@src')
                link = item_loader.load_item().get('link')
                item = item_loader.load_item()
                yield Request(url=parse.urljoin(response.url, link), callback=self.get_data_info)
                yield item
            except Exception as e:
                print('====error:{}'.format(e))
                continue

        next_url_model = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-url').extract_first()
        page_info = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-data').extract_first()
        if not page_info:
            # 无数据
            return
        try:
            page_info = eval(page_info)
        except Exception as e:
            print('====error:{}'.format(e))
            print(response.url)
        cur_page = page_info.get('curPage')
        total_page = page_info.get('totalPage')
        print('===={}/{}'.format(cur_page, total_page))
        print(response.url)
        if cur_page < total_page:
            next_url = parse.urljoin(response.url, next_url_model.format(page=cur_page + 1))
            yield Request(next_url, callback=self.parse_home)
        else:
            pass

    def get_data_info(self, response):
        # 在详情里处理price_range、position_s、position_b
        item_loader = DangdangItemLoader(item=LianjiaItem(), response=response, selector=response.selector)
        item_loader.add_value('house_id', response.url)
        item_loader.add_xpath('position_b', "//div[@class='name']/a[1]/text()")
        item_loader.add_xpath('position_s', "//div[@class='name']/a[2]/text()")
        price_range = int(response.xpath("//div[@class='price']/span/i/text()").extract_first())
        if price_range < 40:
            price_range = '40万以下'
        elif price_range >= 40 and price_range < 60:
            price_range = '40-60万'
        elif price_range >= 60 and price_range < 80:
            price_range = '60-80万'
        elif price_range >= 80 and price_range < 100:
            price_range = '80-100万'
        elif price_range >= 100 and price_range < 150:
            price_range = '100-150万'
        elif price_range >= 150 and price_range < 200:
            price_range = '150-200万'
        elif price_range >= 200 and price_range < 300:
            price_range = '200-300万'
        else:
            price_range = '300万以上'
        item_loader.add_value('price_range', price_range)
        item = item_loader.load_item()
        yield item

    '''以下全量爬取'''

    def parse_position(self, response):
        # 通过区域来定位,爬取全量链家数据
        # 1、通过大区域url获取子链接response
        position_b = response.xpath('//div[@data-role="ershoufang"]/div/a[@class="selected"]/text()')
        all_position_zi = response.xpath("//div[@data-role='ershoufang']/div[2]/a")
        for position_zi in all_position_zi:
            meta = {"position_s": position_zi.xpath(".//text()").extract_first(),
                    "position_b": position_b.extract_first()}
            yield Request(url=parse.urljoin(response.url, position_zi.xpath(".//@href").extract_first()),
                          callback=self.parse_positon_zi, meta=meta)

    def parse_positon_zi(self, response):
        # 取金额
        item_urls = response.xpath("//div[@class='m-filter']/div[@class='list-more']/dl[1]/dd/a")
        for item_url in item_urls:
            meta_new = {
                "price_range": item_url.xpath('.//span[2]/text()').extract_first()
            }
            meta_new.update(response.meta)
            yield Request(url=parse.urljoin(response.url, item_url.xpath('.//@href').extract_first()),
                          callback=self.parse_insert_data, meta=meta_new)

    def parse_insert_data(self, response):
        ul = response.xpath("//ul[@class='listContent']/li")
        position_b = response.xpath('.//div[@data-role="ershoufang"]/div/a[@class="selected"]/text()').extract()[0]
        position_s = response.xpath('.//div[@data-role="ershoufang"]/div/a[@class="selected"]/text()').extract()[1]
        price_range = response.xpath(
            './/span[@class="checkbox checked"]/following-sibling::span[1]/text()').extract_first()
        for li in ul:
            try:
                item_loader = DangdangItemLoader(item=LianjiaItem(), response=response, selector=li)
                item_loader.add_xpath('name', './/div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_type', './/div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_size', './/div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_id', './/div[@class="info"]/div[@class="title"]/a/@href')
                item_loader.add_xpath('money_all', './/div[@class="totalPrice"]/span/text()')
                item_loader.add_xpath('money_every', './/div[@class="unitPrice"]/span/text()')
                item_loader.add_xpath('success_data', './/div[@class="dealDate"]/text()')
                item_loader.add_xpath('link', './/div[@class="info"]/div[@class="title"]/a/@href')
                item_loader.add_xpath('img', './/a/img/@src')
                item_loader.add_value('price_range', price_range)
                item_loader.add_value('position_s', position_s)
                item_loader.add_value('position_b', position_b)
                item = item_loader.load_item()
            except Exception as e:
                print('====error:{}'.format(e))
                continue
            yield item
        next_url_model = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-url').extract_first()
        page_info = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-data').extract_first()
        if not page_info:
            # 无数据
            return
        try:
            page_info = eval(page_info)
        except Exception as e:
            print('====error:{}'.format(e))
            print(response.url)
        cur_page = page_info.get('curPage')
        total_page = page_info.get('totalPage')
        print('===={}/{}'.format(cur_page, total_page))
        print(response.url)
        if cur_page < total_page:
            next_url = parse.urljoin(response.url, next_url_model.format(page=cur_page + 1))
            yield Request(next_url, callback=self.parse_insert_data)
        else:
            pass

二、piplines

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from twisted.enterprise import adbapi


class LianjiaPipeline(object):
    def process_item(self, item, spider):
        return item


class MysqlTwistedPipeline:
    # 异步执行sql语句
    def __init__(self, dbpool):
        self.dbpool = dbpool

    def process_item(self, item, spider):
        query = self.dbpool.runInteraction(self.do_insert, item)
        query.addErrback(self.handler_error, item, spider)

    def do_insert(self, cursor, item):
        # 先查询数据全量存在、外部存在、内部存在
        sql = 'select name,position_s from lianjia_ershoufang_xian_test where house_id={}'.format(item.get('house_id'))
        cursor.execute(sql)
        r = cursor.fetchone()
        if not r:
            # 4、不存在
            if item.get('position_s') and item.get('name'):
                # 全量抓取,全量插入即可
                sql = "insert into lianjia_ershoufang_xian_test (house_id,name,house_type,house_size,money_all,money_every,success_data,img,link,price_range,position_s,position_b) values ({},'{}','{}',{},{},{},'{}','{}','{}','{}','{}','{}');"
                sql = sql.format(item.get('house_id', ''),
                                 item.get('name', ''),
                                 item.get('house_type', ''),
                                 item.get('house_size', ''),
                                 item.get('money_all', ''),
                                 item.get('money_every'),
                                 item.get('success_data'),
                                 item.get('img'),
                                 item.get('link'),
                                 item.get('price_range'),
                                 item.get('position_s'),
                                 item.get('position_b'), )
                cursor.execute(sql)
            elif item.get('position_s') and not item.get('name'):
                # 详情插入,
                sql = "insert into lianjia_ershoufang_xian_test (house_id,price_range,position_s,position_b) values ({},'{}','{}','{}');"
                sql = sql.format(item.get('house_id', ''),
                                 item.get('price_range'),
                                 item.get('position_s'),
                                 item.get('position_b'))
                cursor.execute(sql)
            else:
                # 插外部数据
                sql = "insert into lianjia_ershoufang_xian_test (house_id,name,house_type,house_size,money_all,money_every,success_data,img,link) values ({},'{}','{}',{},{},{},'{}','{}','{}');"
                sql = sql.format(item.get('house_id', ''),
                                 item.get('name', ''),
                                 item.get('house_type', ''),
                                 item.get('house_size', ''),
                                 item.get('money_all', ''),
                                 item.get('money_every'),
                                 item.get('success_data'),
                                 item.get('img'),
                                 item.get('link'))
                cursor.execute(sql)
        elif r.get("name") and r.get('position_s'):
            # 1、全量存在
            return
        elif r.get("name") and not r.get('position_s'):
            # 2 外部存在
            if item.get('position_s'):
                sql = "update lianjia_ershoufang_xian_test set price_range='{}',position_s='{}',position_b='{}' where house_id={};"

                sql = sql.format(item.get('price_range'),
                                 item.get('position_s'),
                                 item.get('position_b'),
                                 item.get('house_id'))
                cursor.execute(sql)
            else:
                return
        elif not r.get("name") and r.get('position_s'):
            # 3、内部存在
            sql = "update lianjia_ershoufang_xian_test set name='{}',house_type='{}',house_size={},money_all={},money_every={},success_data='{}',img='{}',link='{}' where house_id={};"
            sql = sql.format(
                item.get('name', ''),
                item.get('house_type', ''),
                item.get('house_size', ''),
                item.get('money_all', ''),
                item.get('money_every'),
                item.get('success_data'),
                item.get('img'),
                item.get('link'),
                item.get('house_id'))
            cursor.execute(sql)


    def handler_error(self, failure, item, spider):
        print(failure)
        pass

    @classmethod
    def from_settings(cls, settings):
        from MySQLdb.cursors import DictCursor
        dbparms = dict(
            host=settings["MYSQL_HOST"],
            db=settings["MYSQL_DBNAME"],
            user=settings["MYSQL_USER"],
            passwd=settings["MYSQL_PASSWORD"],
            charset='utf8',
            cursorclass=DictCursor,
            use_unicode=True,
        )
        dbpool = adbapi.ConnectionPool("MySQLdb", **dbparms)
        return cls(dbpool)

三、items

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import datetime
import re

import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose


class DangdangItemLoader(ItemLoader):
    default_output_processor = TakeFirst()


def name_convert(value):
    name, house_type, size = value.split(' ')
    return name


def house_type_convert(value):
    name, house_type, size = value.split(' ')
    return house_type


def house_size_convert(value):
    name, house_type, size = value.split(' ')
    return float(size.replace('平米', ''))


def house_id_convert(value):
    house_id = int(re.match('.*?(\d+).*', value).group(1))
    return house_id


def trans_int(value):
    return int(value)


def trans_data(value):
    return datetime.datetime.strptime(value, '%Y.%m.%d')


class LianjiaItem(scrapy.Item):
    # define the fields for your item here like:
    name = scrapy.Field(
        input_processor=MapCompose(name_convert)
        # output_processor=TakeFirst()
    )
    house_id = scrapy.Field(
        input_processor=MapCompose(house_id_convert)
    )
    house_type = scrapy.Field(
        input_processor=MapCompose(house_type_convert)
    )
    house_size = scrapy.Field(
        input_processor=MapCompose(house_size_convert)
    )
    money_all = scrapy.Field(
        input_processor=MapCompose(trans_int)
    )
    money_every = scrapy.Field(
        input_processor=MapCompose(trans_int)
    )
    success_data = scrapy.Field(
        input_processor=MapCompose(trans_data)
    )
    img = scrapy.Field()
    link = scrapy.Field()
    price_range = scrapy.Field()
    position_s = scrapy.Field()
    position_b = scrapy.Field()


四、middlewares

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import base64

from scrapy import signals
import random
from .settings import PROXIES

class LianjiaSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class LianjiaDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


from fake_useragent import UserAgent
class RandomUserAgent(object):
    def process_request(self, request, spider):
        ua = UserAgent()
        request.headers['User-Agent'] = ua.random

# 随机代理
class RandomProxy:
    def process_response(self, request, response, spider):
        # print('代理IP:', request.meta.get('proxy'))
        return response
    def process_request(self,request,spider):
        proxy = random.choice(PROXIES)
        # 没有代理用户密码
        if proxy["user_passwd"] is None:
            pass
            # request.meta["proxy"] = "http://" + '183.47.237.251:80'
            # request.meta["proxy"] = {"http": "http://" + proxy["ip_port"]}
        else:
            # 账户密码进行编码操作
            base64_userpasswd = base64.b64encode(proxy["user_passwd"].encode("utf-8"))
            request.headers["Proxy-Authorization"] = "Basic " + base64_userpasswd.decode("utf-8")
            request.meta["proxy"] = "http://" + proxy["ip_port"]

五、settings

# -*- coding: utf-8 -*-

# Scrapy settings for Lianjia project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'Lianjia'

SPIDER_MODULES = ['Lianjia.spiders']
NEWSPIDER_MODULE = 'Lianjia.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'Lianjia (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# settings 配置可用动态IP
PROXIES = [
            # {"ip_port": '112.80.248.75:80', "user_passwd": None},
           {"ip_port": '117.185.17.151:80', "user_passwd": None},
           {"ip_port": '124.236.111.11:80', "user_passwd": None},
           {"ip_port": '101.132.143.232:80', "user_passwd": None},
           {"ip_port": '111.13.100.91:80', "user_passwd": None},
           {"ip_port": '113.214.13.1:1080', "user_passwd": None}]
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'Lianjia.middlewares.LianjiaSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,
    'Lianjia.middlewares.LianjiaDownloaderMiddleware': None,
    'Lianjia.middlewares.RandomUserAgent': 543,
    'Lianjia.middlewares.RandomProxy': 200,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   # 'Lianjia.pipelines.LianjiaPipeline': 300,
   'Lianjia.pipelines.MysqlTwistedPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
CRAWLSPIDER_FOLLOW_LINKS = True
HTTPERROR_ALLOWED_CODES = [301]
dont_redirect = True

MYSQL_HOST = "xxx.xxx.xxx.xxx"
MYSQL_DBNAME = "xxx"
MYSQL_USER = "xxx"
MYSQL_PASSWORD = "xxx"

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值