scrapy 爬取链家二手房数据

学习使用

只爬取展示的展示的3000条数据

spider:

# -*- coding: utf-8 -*-
from urllib import parse
import scrapy
from scrapy import Request
from Lianjia.items import LianjiaItem,DangdangItemLoader

class LianjiaJobSpider(scrapy.Spider):
    name = 'lianjia_job'
    allowed_domains = ['xa.lianjia.com/chengjiao/']
    start_urls = ['https://xa.lianjia.com/chengjiao/']

    def parse(self, response):
        ul = response.xpath("//ul[@class='listContent']/li")
        for li in ul:
            try:
                item_loader = DangdangItemLoader(item=LianjiaItem(),response=response,selector=li)
                item_loader.add_xpath('name','.//div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_type','.//div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_size','.//div[@class="info"]/div[@class="title"]/a/text()')
                item_loader.add_xpath('house_id','.//div[@class="info"]/div[@class="title"]/a/@href')
                item_loader.add_xpath('money_all','.//div[@class="totalPrice"]/span/text()')
                item_loader.add_xpath('money_every','.//div[@class="unitPrice"]/span/text()')
                item_loader.add_xpath('success_data','.//div[@class="dealDate"]/text()')
                item_loader.add_xpath('link','.//div[@class="info"]/div[@class="title"]/a/@href')
                item_loader.add_xpath('img','.//a/img/@src')
                item = item_loader.load_item()
            except Exception as e:
                print('====error:{}'.format(e))
                continue
            yield item
        next_url_model = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-url').extract_first()
        page_info = response.xpath('//div[@class="page-box house-lst-page-box"]/@page-data').extract_first()
        page_info = eval(page_info)
        cur_page = page_info.get('curPage')
        total_page = page_info.get('totalPage')
        if cur_page < total_page:
            next_url = parse.urljoin(response.url, next_url_model.format(page=cur_page+1))
            yield Request(next_url, callback=self.parse,dont_filter=True)
        else:
            pass

item

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import datetime
import re

import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose


class DangdangItemLoader(ItemLoader):
    default_output_processor = TakeFirst()


def name_convert(value):
    name, house_type, size = value.split(' ')
    return name


def house_type_convert(value):
    name, house_type, size = value.split(' ')
    return house_type


def house_size_convert(value):
    name, house_type, size = value.split(' ')
    return float(size.replace('平米', ''))


def house_id_convert(value):
    house_id = int(re.match('.*?(\d+).*', value).group(1))
    return house_id


def trans_int(value):
    return int(value)


def trans_data(value):
    return datetime.datetime.strptime(value, '%Y.%m.%d')


class LianjiaItem(scrapy.Item):
    # define the fields for your item here like:
    name = scrapy.Field(
        input_processor=MapCompose(name_convert)
        # output_processor=TakeFirst()
    )
    house_id = scrapy.Field(
        input_processor=MapCompose(house_id_convert)
    )
    house_type = scrapy.Field(
        input_processor=MapCompose(house_type_convert)
    )
    house_size = scrapy.Field(
        input_processor=MapCompose(house_size_convert)
    )
    money_all = scrapy.Field(
        input_processor=MapCompose(trans_int)
    )
    money_every = scrapy.Field(
        input_processor=MapCompose(trans_int)
    )
    success_data = scrapy.Field(
        input_processor=MapCompose(trans_data)
    )
    img = scrapy.Field()
    link = scrapy.Field()

pipelines

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from twisted.enterprise import adbapi


class LianjiaPipeline(object):
    def process_item(self, item, spider):
        return item


class MysqlTwistedPipeline:
    # 异步执行sql语句
    def __init__(self, dbpool):
        self.dbpool = dbpool

    def process_item(self, item, spider):
        query = self.dbpool.runInteraction(self.do_insert, item)
        query.addErrback(self.handler_error, item, spider)

    def do_insert(self, cursor, item):
        sql = "insert into lianjia_ershoufang_xian_test (house_id,name,house_type,house_size,money_all,money_every,success_data,img,link) values ({},'{}','{}',{},{},{},'{}','{}','{}');"
        sql = sql.format(item.get('house_id', ''),
                         item.get('name', ''),
                         item.get('house_type', ''),
                         item.get('house_size', ''),
                         item.get('money_all', ''),
                         item.get('money_every'),
                         item.get('success_data'),
                         item.get('img'),
                         item.get('link'))
        print(sql)

        cursor.execute(sql)

    def handler_error(self, failure, item, spider):
        print(failure)

    @classmethod
    def from_settings(cls, settings):
        from MySQLdb.cursors import DictCursor
        dbparms = dict(
            host=settings["MYSQL_HOST"],
            db=settings["MYSQL_DBNAME"],
            user=settings["MYSQL_USER"],
            passwd=settings["MYSQL_PASSWORD"],
            charset='utf8',
            cursorclass=DictCursor,
            use_unicode=True,
        )
        dbpool = adbapi.ConnectionPool("MySQLdb", **dbparms)
        return cls(dbpool)

settings

# -*- coding: utf-8 -*-

# Scrapy settings for Lianjia project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'Lianjia'

SPIDER_MODULES = ['Lianjia.spiders']
NEWSPIDER_MODULE = 'Lianjia.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'Lianjia (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# settings 配置可用动态IP
PROXIES = [
            # {"ip_port": '112.80.248.75:80', "user_passwd": None},
           {"ip_port": '117.185.17.151:80', "user_passwd": None},
           {"ip_port": '124.236.111.11:80', "user_passwd": None},
           {"ip_port": '101.132.143.232:80', "user_passwd": None},
           {"ip_port": '111.13.100.91:80', "user_passwd": None},
           {"ip_port": '113.214.13.1:1080', "user_passwd": None}]
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False

# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
# }

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'Lianjia.middlewares.LianjiaSpiderMiddleware': 543,
# }

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
    'Lianjia.middlewares.LianjiaDownloaderMiddleware': None,
    'Lianjia.middlewares.RandomUserAgent': 543,
    'Lianjia.middlewares.RandomProxy': 200,
}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   # 'Lianjia.pipelines.LianjiaPipeline': 300,
   'Lianjia.pipelines.MysqlTwistedPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

MYSQL_HOST = "xxx.xxx.xxx.xxx"
MYSQL_DBNAME = "xxx"
MYSQL_USER = "xxx"
MYSQL_PASSWORD = "xxx"

middlewares

# -*- coding: utf-8 -*-

# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
import base64

from scrapy import signals
import random
from .settings import PROXIES

class LianjiaSpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_spider_input(self, response, spider):
        # Called for each response that goes through the spider
        # middleware and into the spider.

        # Should return None or raise an exception.
        return None

    def process_spider_output(self, response, result, spider):
        # Called with the results returned from the Spider, after
        # it has processed the response.

        # Must return an iterable of Request, dict or Item objects.
        for i in result:
            yield i

    def process_spider_exception(self, response, exception, spider):
        # Called when a spider or process_spider_input() method
        # (from other spider middleware) raises an exception.

        # Should return either None or an iterable of Request, dict
        # or Item objects.
        pass

    def process_start_requests(self, start_requests, spider):
        # Called with the start requests of the spider, and works
        # similarly to the process_spider_output() method, except
        # that it doesn’t have a response associated.

        # Must return only requests (not items).
        for r in start_requests:
            yield r

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


class LianjiaDownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
        # This method is used by Scrapy to create your spiders.
        s = cls()
        crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
        return s

    def process_request(self, request, spider):
        # Called for each request that goes through the downloader
        # middleware.

        # Must either:
        # - return None: continue processing this request
        # - or return a Response object
        # - or return a Request object
        # - or raise IgnoreRequest: process_exception() methods of
        #   installed downloader middleware will be called
        return None

    def process_response(self, request, response, spider):
        # Called with the response returned from the downloader.

        # Must either;
        # - return a Response object
        # - return a Request object
        # - or raise IgnoreRequest
        return response

    def process_exception(self, request, exception, spider):
        # Called when a download handler or a process_request()
        # (from other downloader middleware) raises an exception.

        # Must either:
        # - return None: continue processing this exception
        # - return a Response object: stops process_exception() chain
        # - return a Request object: stops process_exception() chain
        pass

    def spider_opened(self, spider):
        spider.logger.info('Spider opened: %s' % spider.name)


from fake_useragent import UserAgent
class RandomUserAgent(object):
    def process_request(self, request, spider):
        ua = UserAgent()
        request.headers['User-Agent'] = ua.random

# 随机代理
class RandomProxy:
    def process_response(self, request, response, spider):
        print('代理IP:', request.meta.get('proxy'))
        return response
    def process_request(self,request,spider):
        proxy = random.choice(PROXIES)
        # 没有代理用户密码
        if proxy["user_passwd"] is None:
            pass
            # request.meta["proxy"] = "http://" + '183.47.237.251:80'
            # request.meta["proxy"] = {"http": "http://" + proxy["ip_port"]}
        else:
            # 账户密码进行编码操作
            base64_userpasswd = base64.b64encode(proxy["user_passwd"].encode("utf-8"))
            request.headers["Proxy-Authorization"] = "Basic " + base64_userpasswd.decode("utf-8")
            request.meta["proxy"] = "http://" + proxy["ip_port"]

 

CREATE TABLE `lianjia_ershoufang_xian_test` (
	`id` INT(11) NOT NULL AUTO_INCREMENT,
	`createTime` DATETIME NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
	`updateTime` TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '创建时间',
	`house_id` BIGINT(20) NULL DEFAULT NULL COMMENT '房子id',
	`name` VARCHAR(50) NULL DEFAULT NULL COMMENT '小区名' COLLATE 'utf8mb4_bin',
	`house_type` VARCHAR(50) NULL DEFAULT NULL COMMENT '户型' COLLATE 'utf8mb4_bin',
	`house_size` FLOAT NULL DEFAULT NULL COMMENT '面积大小',
	`money_all` INT(11) NULL DEFAULT NULL COMMENT '总价',
	`money_every` INT(11) NULL DEFAULT NULL COMMENT '单价',
	`success_data` DATETIME NULL DEFAULT NULL COMMENT '成交日期',
	`img` VARCHAR(100) NULL DEFAULT NULL COMMENT '户型图片链接' COLLATE 'utf8mb4_bin',
	`link` VARCHAR(100) NULL DEFAULT NULL COMMENT '房屋信息链接' COLLATE 'utf8mb4_bin',
	PRIMARY KEY (`id`),
	UNIQUE INDEX `house_id` (`house_id`)
)
COMMENT='链家西安二手房成交信息'
COLLATE='utf8mb4_bin'
ENGINE=InnoDB
AUTO_INCREMENT=3099
;

 

  • 2
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 5
    评论
评论 5
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值