爬虫:IP代理池的四个模块的介绍以及各个模块的实现(上)

代理池分为4个模块:存储模块、获取模块、检测模块、接口模块。
 

模块作用
存储模块存储模块使用Redis的有序集合,用来做dialing的去重和状态表示,同时它也是中心模块和基础模块,将其他模块串连起来
获取模块获取模块定时从代理网站获取代理,将获取的代理传递给存储模块,并保存到数据库
检测模块检测模块定时从存储模块获取所有代理,并对代理进行检测,根据不同的检测结果对代理设置不同的标识
接口模块接口模块通过Web API接通的服务接口,接口通过连接数据库并通过Web形式返回可用的代理
存储模块的实现:
import redis
from random import choice

MAX_SCORE = 100
MIN_SCORE = 0
INITIAL_SCORE = 10
REDIS_HOST = "localhost"
REDIS_PORT = 6379
DB_NUM = 5
REDIS_KEY = 'proxies'


class PoolEmptyError(Exception):
    """自定义ip代理池为空异常"""
    pass


class RedisClient(object):
    """redis数据库客户端类"""

    def __init__(self, host=REDIS_HOST, port=REDIS_PORT, db=DB_NUM):
        """
        初始化
        :param host:Redis 地址
        :param port: Redis 端口
        :param db: Redis 数据库
        """
        self.db = redis.StrictRedis(host=host, port=port, db=db, decode_responses=True)

    def add(self, proxy, score=INITIAL_SCORE):
        """
        添加代理,设置分数为初始分数
        :param proxy: 代理
        :param score: 分数
        :return: 添加结果
        """
        if not self.db.zscore(REDIS_KEY, proxy):
            return self.db.zadd(REDIS_KEY, {proxy: score})

    def random(self):
        """
        随机获取有效代理,首先尝试获取最高分数代理,如果最高分数不存在,则按照排名获取,否则异常
        :return:随机代理
        """
        result = self.db.zrangebyscore(REDIS_KEY, MAX_SCORE, MAX_SCORE)
        if len(result):
            return choice(result)
        else:
            result = self.db.zrevrange(REDIS_KEY, 0, 100)
            if len(result):
                return choice(result)
            else:
                raise PoolEmptyError

    def decrease(self, proxy):
        """
        代理值减一分,分数小于最小值,则代理删除
        :param proxy: 代理
        :return: 修改后的代理分数
        """
        score = self.db.zscore(REDIS_KEY, proxy)
        if score and score > MIN_SCORE:
            print('代理', proxy, '当前分数', score, '减1')
            return self.db.zincrby(REDIS_KEY, -1, proxy)
        else:
            print("代理", proxy, "当前分数", score, "移除")
            return self.db.zrem(REDIS_KEY, proxy)

    def exits(self, proxy):
        """
        判断proxy是否存在
        :param proxy: 代理
        :return:是否存在
        """
        return not self.db.zscore(REDIS_KEY, proxy) is None

    def max(self, proxy):
        """
        将代理在设置为MAX_SCORE
        :param proxy:
        :return:
        """
        print("代理", proxy, "可用, 设置为", MAX_SCORE)
        return self.db.zadd(REDIS_KEY, {proxy:MAX_SCORE})

    def count(self):
        """
        获取数量
        :return: 数量
        """
        return self.db.zcard(REDIS_KEY)

    def all(self):
        """
        获取全部代理
        :return: 全部代理列表
        """
        return self.db.zrangebyscore(REDIS_KEY, MIN_SCORE, MAX_SCORE, withscores=True)
获取模块:
import json
from utils import get_page
import re
from pyquery import PyQuery as pq


class ProxyMetaclass(type):
    """
    定义元类, 给类添加俩私有类属性__CrawlFunc__, __CrawlFuncCount__
    :return 类创建的引用
    """
    def __new__(cls, name, base, attrs):
        count = 0
        attrs['__CrawlFunc__'] = []
        for k, v in attrs.items():
            if 'crawl_' in k:
                attrs['__CrawlFunc__'].append(k)
                count += 1
        attrs["__CrawlFuncCount__"] = count
        return type.__new__(cls, name, base, attrs)


class Crawler(object, metaclass=ProxyMetaclass):
    """
    爬虫类定义
    """
    def get_proxies(self, callback):
        """
        通过crawl获取的到的代理添加到proxies列表中,并返回,这里用到了协程
        :param callback: 下面的crawl_daili66
        :return:
        """
        proxies = []
        for proxy in eval("self.{}()".format(callback)):
            print("成功获取到代理", proxy)
            proxies.append(proxy)
        return proxies

    def crawl_daili66(self, page_count=4):
        """
        66快代理
        :param page_count:
        :return:
        """
        start_url = 'http://www.66ip.cn/{}.html'
        headers = {
            "Cookie": "__jsluid=d67ba1bf483046976b81a6b33a372b1c; Hm_lvt_1761fabf3c988e7f04bec51acd4073f4=1555505708; Hm_lpvt_1761fabf3c988e7f04bec51acd4073f4=1555508780",
            "Host": "www.66ip.cn",
        }
        urls = [start_url.format(page) for page in range(1, page_count + 1)]
        count = page_count
        for url in urls:
            headers_referer = {
                "Referer": "http://www.66ip.cn/{}".format(count - page_count + 1)
            }
			# 需要特别注意:headers.update(headers_referer)返回值None,添加headers_referer的键值对到headers字典的键值对
            headers.update(headers_referer)
            page_count -= 1
            html = get_page(url, options=headers)
            if html:
                doc = pq(html)
                trs = doc('.containerbox table tr:gt(0)').items()
                for tr in trs:
                    ip = tr.find('td:nth-child(1)').text()
                    port = tr.find('td:nth-child(2)').text()
                    yield ":".join([ip, port])

    def crawl_kuaidaili(self):
        """
        快代理
        :return:
        """
        for i in range(1, 4):
            start_url = 'http://www.kuaidaili.com/free/inha/{}/'.format(i)
            html = get_page(start_url)
            if html:
                ip_address = re.compile('<td data-title="IP">(.*?)</td>')
                re_ip_address = ip_address.findall(html)
                port = re.compile('<td data-title="PORT">(.*?)</td>')
                re_port = port.findall(html)
                for address, port in zip(re_ip_address, re_port):
                    address_port = address + ':' + port
                    yield address_port.replace(' ', '')

    def crawl_ip3366(self):
        """
        云代理
        :return:
        """
        for page in range(1, 4):
            start_url = 'http://www.ip3366.net/free/?stype=1&page={}'.format(page)
            html = get_page(start_url)
            ip_address = re.compile('<tr>\s*<td>(.*?)</td>\s*<td>(.*?)</td>')
            # \s * 匹配空格,起到换行作用
            re_ip_address = ip_address.findall(html)
            for address, port in re_ip_address:
                result = address + ':' + port
                yield result.replace(' ', '')

    def crawl_xicidaili(self):
        """
        国内高匿代理IP
        :return:
        """
        for i in range(1, 3):
            start_url = 'http://www.xicidaili.com/nn/{}'.format(i)
            headers = {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
                'Cookie': '_free_proxy_session=BAh7B0kiD3Nlc3Npb25faWQGOgZFVEkiJWRjYzc5MmM1MTBiMDMzYTUzNTZjNzA4NjBhNWRjZjliBjsAVEkiEF9jc3JmX3Rva2VuBjsARkkiMUp6S2tXT3g5a0FCT01ndzlmWWZqRVJNek1WanRuUDBCbTJUN21GMTBKd3M9BjsARg%3D%3D--2a69429cb2115c6a0cc9a86e0ebe2800c0d471b3',
                'Host': 'www.xicidaili.com',
                'Referer': 'http://www.xicidaili.com/nn/3',
                'Upgrade-Insecure-Requests': '1',
            }
            html = get_page(start_url, options=headers)
            if html:
                find_trs = re.compile('<tr class.*?>(.*?)</tr>', re.S)
                trs = find_trs.findall(html)
                for tr in trs:
                    find_ip = re.compile('<td>(\d+\.\d+\.\d+\.\d+)</td>')
                    re_ip_address = find_ip.findall(tr)
                    find_port = re.compile('<td>(\d+)</td>')
                    re_port = find_port.findall(tr)
                    for address, port in zip(re_ip_address, re_port):
                        address_port = address + ':' + port
                        yield address_port.replace(' ', '')

    def crawl_iphai(self):
        """ip海"""
        start_url = 'http://www.iphai.com/'
        html = get_page(start_url)
        if html:
            find_tr = re.compile('<tr>(.*?)</tr>', re.S)
            trs = find_tr.findall(html)
            for s in range(1, len(trs)):
                find_ip = re.compile('<td>\s+(\d+\.\d+\.\d+\.\d+)\s+</td>', re.S)
                re_ip_address = find_ip.findall(trs[s])
                find_port = re.compile('<td>\s+(\d+)\s+</td>', re.S)
                re_port = find_port.findall(trs[s])
                for address, port in zip(re_ip_address, re_port):
                    address_port = address + ':' + port
                    yield address_port.replace(' ', '')

    def crawl_data5u(self):
        """无忧代理"""
        start_url = 'http://www.data5u.com/free/gngn/index.shtml'
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'Cookie': 'JSESSIONID=47AA0C887112A2D83EE040405F837A86',
            'Host': 'www.data5u.com',
            'Referer': 'http://www.data5u.com/free/index.shtml',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.108 Safari/537.36',
        }
        html = get_page(start_url, options=headers)
        if html:
            ip_address = re.compile('<span><li>(\d+\.\d+\.\d+\.\d+)</li>.*?<li class=\"port.*?>(\d+)</li>', re.S)
            re_ip_address = ip_address.findall(html)
            for address, port in re_ip_address:
                result = address + ':' + port
                yield result.replace(' ', '')


if __name__ == "__main__":
    c = Crawler()
    # 一一测试
    print(c.get_proxies("crawl_daili66"))

公共模块:
import requests
from requests.exceptions import ConnectionError
from db import RedisClient

base_headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
    'Accept-Encoding': 'gzip, deflate, sdch',
    'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
}


def get_page(url, options={}, proxy=None):
    """
    抓取代理
    :param url:网址
    :param options:
    :return:
    """
    headers = dict(base_headers, **options)
    print('正在抓取', url)

    try:
        redis = RedisClient()
        proxies = {
            "http": "http://"+redis.random(),
        }
        response = requests.get(url, headers=headers, proxies=proxies)
        print('抓取成功', url, response.status_code)
        if response.status_code == 200:
            return response.text
    except ConnectionError:
        print('抓取失败', url)
        return None

更多内容,参考下一篇博客
参考文献:
github:https://github.com/Python3WebSpider/ProxyPool

  • 2
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值