自定义爬虫文件:jenkinsIP.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy import selector
from bs4 import BeautifulSoup as bs
from jenkins_IP.items import JenkinsIpItem
# import sys
# reload(sys)
# sys.setdefaultencode('utf8')
import re,datetime
class JenkinsipSpider(scrapy.Spider):
name = 'jenkinsIP'
#allowed_domains = ['172.**.**.**:8089'] #在使用post方法时这个域名可能会导致自定义的 callback方法不执行
#start_urls = ['http://172.**.**.**:8089/jenkins/computer/']
def start_requests(self):
'''post方法获取节点'''
url="http://172.**.**.**:8089/jenkins/ajaxExecutors"
data={}
iphtml=scrapy.FormRequest(url=url,callback=self.parse)
yield iphtml #将iplist的页面回传给parse函数作为response
def parse(self, response):
#print response.body
#html=bs(response.body,'lxml')
#print html
iplist=response.selector.xpath('//th/a["href"]')
#iplist = response.selector.xpath('//a/@href').extract()
lists=[]
for it in iplist:
href=it.xpath('@href').extract()[0]
#print href
lists.append(href)
print len(iplist)
for i in lists:
if i == "/jenkins/computer/(master)/":
continue
url="http://172.**.**.**:8089"+i
urls=response.urljoin(url)
print("the url is :"+urls)
###f封装下一个爬取的url地址##
yield scrapy.Request(url=urls,callback=self.parse_ipreason)
def parse_ipreason(self, response):
'''解析具体ip地址的信息:下线原因等'''
print ("start get ip info:")
#print response
item = JenkinsIpItem()
ip= response.selector.xpath('//h1/text()').extract()[0]
item['ip'] =re.sub('_','.',re.findall('[1-9]*_[0-9]*_[0-9]*_[0-9]*',ip)[0]).strip()
print item['ip']
item['type'] = response.selector.xpath('//h1/img/@src').extract()[0].strip()
if "computer-x.png" == item['type'].split('/')[-1]:
item['type'] = "offline"
item['time'] =response.selector.css('div[class ="timestamp"]::text').getall()[0] #[u'Aug 18, 2020 4:42:31 PM']
# print item['reason'][0].split(' ')[0:3]
# #print datetime.datetime.strptime(item['reason'][0].split(' ')[0:3],'%b %d, %Y').strftime('%Y-%m-%d')
item['reason'] = response.selector.css('div[class ="message"]::text').getall()[0].split(':')[1].strip() #[u'Aug 18, 2020 4:42:31 PM']
item['user'] = response.selector.css('div[class ="message"]::text').getall()[0].split(':')[0].strip()
else:
item['type']="online"
item['reason']=" "
item['time']=" "
item['user']=" "
#print item['type']
yield item
定义需要获取的数据:items.py
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
'''定义需要的数据'''
class JenkinsIpItem(scrapy.Item):
# define the fields for your item here like:
ip = scrapy.Field()
type=scrapy.Field()
reason=scrapy.Field()
time=scrapy.Field()
user=scrapy.Field()
数据过滤:pipelines.py
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
#######对传回来的item数据做过滤处理########
# 这里可以创建多个pipeline的类,在setting文件中配置编译顺序
#包括存储到数据库 清除空数据等
import pymysql
from scrapy.exceptions import DropItem #扔掉item的异常
from scrapy.pipelines.images import ImagesPipeline #继承这个类,处理图像;一般默认是继承的object类
class IsNullPipeline(object):
def process_item(self, item, spider):
'''必备方法'''
if item['ip']:
raise DropItem() #丢弃数据
else:
return item #这个item会传给下一个pipeline的类
class JenkinsIpPipeline(object):
'''这个pipeline实现数据库数据存储'''
def __init__(self,myhost,mydb,password,myuser,myport):
self.db=mydb
self.host=myhost
self.passwd=password
self.user=myuser
self.port=myport
print("#"*40)
print self.db,self.host,self.passwd,self.port,self.user
####开始注入setting中配置的数据库的数据:依赖注入##
@classmethod
def from_crawler(cls, crawler):
'''这里的cls是表示的当前类,cra表示获取当前的整个工程'''
#print crawler.settings.get("HOST")
return cls(
myhost=crawler.settings.get("HOST"), #将setting文件里面配置的参数回传给init方法,setting的配置必须为大写命名
mydb=crawler.settings.get("MYDB"),
password=crawler.settings.get("PSWD"),
myuser=crawler.settings.get("USER"),
myport=crawler.settings.get("PORT"),
)
def process_item(self, item, spider):
'''必备方法'''
print ("*"*10)
sql="select * from mytable"
self.cursor.execute(sql)
sqls="insert into mytable(***) values(%s,%s,%s,%s,%s)" %(item['ip'],item['time'],item['type'],item['reason'],item['user'])
self.cursor.execute(sqls)
self.db.commit()
return item
def open_spider(self,spider):
'''相当于init初始化函数 最开始调用'''
self.db=pymysql.connect(self.host,self.db,self.user,self.passwd,charset="utf8",port=self.port)
self.cursor=self.db.cursor()
def close_spider(self,spider):
'''相当于析构函数自动调用 最后执行'''
self.db.close()
self.cursor.close()
class imagePipeline(ImagesPipeline):
'''这个pipeline实现图片下载'''
def __init__(self):
pass
#具体方法百度
中间件处理:middlewares.py
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class JenkinsIpSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class JenkinsIpDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
基础信息配置:settings.py
# -*- coding: utf-8 -*-
# Scrapy settings for jenkins_IP project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'jenkins_IP'
SPIDER_MODULES = ['jenkins_IP.spiders']
NEWSPIDER_MODULE = 'jenkins_IP.spiders'
#####这里配置数据库信息,命名必须为大写####
HOST="localhost"
MYDB="mydb"
PORT="8080"
USER="root"
PSWD=""
#
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'jenkins_IP (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False #关闭robots协议
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Content-Type': 'text/html;charset=UTF-8',
'Transfer-Encoding': 'chunked',
'X-Content-Encoding-Over-Network': 'gzip',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'
}
###定义文件存储路径##
IMAGES_STORE='./images'
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'jenkins_IP.middlewares.JenkinsIpSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# # 'jenkins_IP.middlewares.JenkinsIpDownloaderMiddleware': 543,
# # }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
####这里决定pipeline的执行顺序,数字大就后面执行###
ITEM_PIPELINES = {
#'jenkins_IP.pipelines.IsNullPipeline': 300,
'jenkins_IP.pipelines.JenkinsIpPipeline': 301,
# 'jenkins_IP.pipelines.imagePipeline': 302,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
####同时生成文件####3
FEED_URI='./ip.cvs'
FEED_FORMAT='cvs'
FEED_EXPORT_ENCODING='ansi'
爬虫文件调用:main.py
# -*- coding: utf-8 -*-
from scrapy import cmdline #导入shell执行框模块
cmdline.execute(['scrapy','crawl','jenkinsIP']) #执行jenkinsip 爬虫文件
ps:这里有比较灵活的配置注入数据的方式