利用scrapy框架结合redis分布式爬虫
#创建项目
scrapy startproject homepro
#根据提示进入指定目录创建爬虫
scrapy genspider home example.com #spider爬虫
scrapy genspider -t crawl home example #crawlspider爬虫
其他不说直接行代码
items.py代码段
import scrapy
class HomeproItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
city = scrapy.Field()
title = scrapy.Field() # 名字
rentway = scrapy.Field() # 出租方式
price = scrapy.Field()
housetype = scrapy.Field() # 户型
area = scrapy.Field() # 面积
address = scrapy.Field() # 地址
traffic = scrapy.Field() # 交通
#piplines.py代码段
from scrapy.utils.project import get_project_settings
class HomeproPipeline(object):
def process_item(self, item, spider):
return item
import sqlite3
from scrapy.utils.project import get_project_settings
class Sqlite3Pipeline(object):
def open_spider(self,spider):
#可以在settings指定sqlite数据库以及表名
# settings = get_project_settings()
# name = settings['SQLITE_TABLE']
# self.db = sqlite3.connect(settings['SQLITE_FILE'])
#指定数据库
self.db = sqlite3.connect(home.db)
self.cur = self.db.cursor()
def close_spider(self,spider):
self.db.close()
def process_item(self,item,spider):
self.save_to_sqlite(item)
return item
def save_to_sqlite(self, item):
# 拼接sql语句
#表名直接指定就行,可以先把代码写好执行一下程序,会在当前路径下生成一个数据库,链接当前路径下的数据库,然后手动创建一张表
#表名与字段要与下面的保持一致
sql = 'insert into dameo(city,title,rentway,price,housetype,area,address,traffic) values("%s","%s","%s","%s","%s","%s","%s","%s")' % (
item['city'], item['title'], item['rentway'], item['price'], item['housetype'], item['area'],item['address'], item['traffic'])
# 执行sql语句
try:
self.cur.execute(sql)
self.db.commit()
except Exception as e:
print(e)
self.db.rollback()
return item
#解决冲突问题
# sql = 'insert into dameo(city,title,rentway,price,housetype,area,address,traffic) values(?,?,?,?,?,?,?,?)'
# param = (item['city'], item['title'], item['rentway'], item['price'], item['housetype'], item['area'],item['address'], item['traffic'])
# self.cur.execute(sql,param)
#也可以写入mysql
import pymysql
class mysqlPipeline(object):
def open_spider(self,spider):
self.connect=pymysql.connect(host='主机',port=3306,user='用户名t',pwd='密码',database='数据库',charset='utf8')
def close_spider(self,spider):
self.connect.close()
def process_item(self,item,spider):
self.save_mysql(item)
return item
def save_mysql(self,item):
cursor=self.connect.cursor()
sql='insert into zufang()'
try:
cursor.execute(sql)
self.connect.commit()
except Exception as e:
print(e)
self.connect.rollback()
#写入mongodb
import pymongo
class mongodbPipeline(object):
def open_spider(self,spider):
self.client=pymongo.MongoClient(host='主机',port=27017)
def close_spider(self,spider):
self.client.close()
def process_item(self,item,spider):
#数据库
db=self.client.anjuke
#集合
clo=db.zufang
clo.insert(dict(item))
return item
# -*- coding: utf-8 -*-
# Scrapy settings for homepro project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# 指定使用scrapy-redis的调度器
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
# 指定使用scrapy-redis的去重
DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'
# 指定排序爬取地址时使用的队列,
# 默认的 按优先级排序(Scrapy默认),由sorted set实现的一种非FIFO、LIFO方式。
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
BOT_NAME = 'homepro'
SPIDER_MODULES = ['homepro.spiders']
NEWSPIDER_MODULE = 'homepro.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'homepro.middlewares.HomeproSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'homepro.middlewares.HomeproDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'homepro.pipelines.HomeproPipeline': 300,
# 'scrapy_redis.pipelines.RedisPipeline': 300,
'homepro.pipelines.Sqlite3Pipeline': 301,
}
SQLITE_FILE = 'sqlite.db'
SQLITE_TABLE = 'dameo'
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# REDIS_HOST = '10.8.153.73'
# REDIS_PORT = 6379
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, deflate, sdch'
}
#爬虫代码
# -*- coding: utf-8 -*-
import scrapy
from homepro.items import HomeproItem
from scrapy_redis.spiders import RedisCrawlSpider
# scrapy.Spider
class HomeSpider(RedisCrawlSpider):
name = 'home'
allowed_domains = ['zu.fang.com']
# start_urls = ['http://zu.fang.com/cities.aspx']
redis_key = 'homespider:start_urls'
def parse(self, response):
hrefs = response.xpath('//div[@class="onCont"]/ul/li/a/@href').extract()
for href in hrefs:
href = 'http:'+ href
yield scrapy.Request(url=href,callback=self.parse_city,dont_filter=True)
def parse_city(self, response):
page_num = response.xpath('//div[@id="rentid_D10_01"]/span[@class="txt"]/text()').extract()[0].strip('共页')
# print('*' * 100)
# print(page_num)
# print(response.url)
for page in range(1, int(page_num)):
if page == 1:
url = response.url
else:
url = response.url + 'house/i%d' % (page + 30)
print('*' * 100)
print(url)
yield scrapy.Request(url=url, callback=self.parse_houseinfo, dont_filter=True)
def parse_houseinfo(self, response):
divs = response.xpath('//dd[@class="info rel"]')
for info in divs:
city = info.xpath('//div[@class="guide rel"]/a[2]/text()').extract()[0].rstrip("租房")
title = info.xpath('.//p[@class="title"]/a/text()').extract()[0]
rentway = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[0].extract().replace(" ", '').lstrip('\r\n')
housetype = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[1].extract().replace(" ", '')
area = info.xpath('.//p[@class="font15 mt12 bold"]/text()')[2].extract().replace(" ", '')
addresses = info.xpath('.//p[@class ="gray6 mt12"]//span/text()').extract()
address = '-'.join(i for i in addresses)
try:
des = info.xpath('.//p[@class ="mt12"]//span/text()').extract()
traffic = '-'.join(i for i in des)
except Exception as e:
traffic = "暂无详细信息"
p_name = info.xpath('.//div[@class ="moreInfo"]/p/text()').extract()[0]
p_price = info.xpath('.//div[@class ="moreInfo"]/p/span/text()').extract()[0]
price = p_price + p_name
item = HomeproItem()
item['city'] = city
item['title'] = title
item['rentway'] = rentway
item['price'] = price
item['housetype'] = housetype
item['area'] = area
item['address'] = address
item['traffic'] = traffic
yield item
然后把代码发给其他附属机器,分别启动.子程序redis链接主服务器redis
redis-cli -h 主服务器ip
#主服务器先启动redis-server
#在启动redis-cli
lpush homespider:start_urls 起始的url