任务描述:
爬取西刺代理、云代理、快代理等网站的免费代理IP,并实现入库、验证和接口功能
任务拆解:
- 发送网络请求并解析数据,获取免费代理IP,并入库
- 利用redis中的有序集合存储数据,可实现根据优先级获取代理IP
- 验证代理IP的有效性,并相应的修改代理IP的优先级
- 创建flask接口
1 组件概览
1)spider: 发送网络请求及解析数据
2)proxies_client: 与redis的交互
3)proxies_downloader: 代理IP的下载和入库
4)proxies_verify: 验证代理IP的有效性
5)proxies_api : flask接口
2 spider组件
1)base.py文件
base.py封装了基本的发送请求的函数以及一些参数配置。是其他爬虫类的基类。为了防止数据入库的时候字段写错,这里还借助了scrapy.Item的功能。
import requests
import scrapy
import logging
from requests.exceptions import HTTPError
import time
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(threadName)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class ProxyItem(scrapy.Item):
scheme = scrapy.Field()
proxy = scrapy.Field()
class BaseSpider(object):
encoding = 'utf-8'
base_url = ''
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0',
}
page = 1
count = 0
def __init__(self):
msg = "【访问网页】 :爬虫【{}】正在下载网页信息".format(self.__class__)
logging.info(msg)
@property
def start_urls(self):
for i in range(1,self.page+1):
yield self.base_url.format(i)
def get_response(self,url):
time.sleep(0.5)
response = requests.get(url,headers=self.headers)
return response.content.decode(self.encoding)
def parse(self,response):
yield None
@property
def proxies(self):
for url in self.start_urls:
logging.info('【访问网页】 : 正在从网页【{}】下载数据'.format(url))
response = self.get_response(url)
for item in self.parse(response):
yield item
2)kuaidaili.py文件
从https://www.kuaidaili.com/ 中爬取免费代理IP并解析数据
from proxies_pool.spider.base import BaseSpider, ProxyItem
from lxml import etree
page = 5
class KuaiSpider(BaseSpider):
def __init__(self):
super(self.__class__,self).__init__()
self.base_url = 'https://www.kuaidaili.com/free/inha/{}/'
self.page = page
def parse(self, response):
html = etree.HTML(response)
trs = html.xpath('.//*[@id="list"]/table/tbody/tr')
for tr in trs:
item = ProxyItem()
item['scheme'] = tr.xpath('./td[4]/text()')[0].lower()
item['proxy'] = item['scheme'] + "://" + tr.xpath('./td[1]/text()')[0] + ":" + \
tr.xpath('./td[2]/text()')[0]
self.count += 1
yield item
if __name__ == '__main__':
spider = KuaiSpider()
num = 0
for item in spider.proxies:
print(item)
print(num)
3)xicidaili.py文件
从https://www.xicidaili.com/ 中爬取免费代理IP并解析数据
from proxies_pool.spider.base import BaseSpider, ProxyItem
from lxml import etree
page = 2
class XiciSpider(BaseSpider):
def __init__(self):
super(self.__class__, self).__init__()
self