python爬虫,代理池proxypool文件夹内容设置

api.py

from flask import Flask,g

from proxypool.db import Reids_Client

__all__=['app']
app = Flask(__name__)
def get_conn():
    if not hasattr(g,'reids_client'):
        g.redis_client = Reids_Client()
    return g.redis_client

@app.route('/')
def index():
    return '<h1>欢迎来到代理池系统!</h1>'
@app.route('/get')
def get():
    conn = get_conn()
    proxy = conn.pop()
    if isinstance(proxy,bytes):
        return proxy.decode('utf-8')
    return proxy
@app.route('/count')
def count():
    return str(get_conn().queue_len)

db.py

import redis
from proxypool.setttings import PASSWORD,HOST,PORT,PROXIES
class Reids_Client(object):
    def __init__(self,host=HOST,port = PORT):
        #判断是否有密码:
        if PASSWORD:
            self._db =redis.Redis(host=host,port=port,password=PASSWORD)
        else:
            self._db = redis.Redis(host=host,port=port)
    #将代理添加到代理池的尾部
    def put(self,proxy):
        self._db.rpush(PROXIES,proxy)

    #从头部获取count个代理,并将其删除
    def get(self,count=1):
        #lrange:获取指定范围的内容
        #ltrim :保留指定范围内的内容,其他删除
        proxies = self._db.lrange(PROXIES,0,count-1)
        self._db.ltrim(PROXIES,count,-1)
        return proxies

    #从尾部获取一个最新代理
    def pop(self):
        #从redis中获取处来的数据是bytes
        return self._db.rpop(PROXIES).decode('utf-8')

    #计算代理池长度
    @property
    def queue_len(self):
        return self._db.llen(PROXIES)

    def flush(self):
        self._db.flushall()

if __name__ == '__main__':
    r = Reids_Client()
    print(r.queue_len)

demo_with.py

import requests

# response = requests.get('https://www.baidu.com/')
#
# with requests.get('https://www.baidu.com/') as response:
#     print(response.text)
#
# print(response.status_code)
str1 = 'a()'
def a():
    print('a11111')
eval(str1)

run.py

from proxypool.api import app
from proxypool.Scheduler import Scheduler
def main():
    s = Scheduler()
    s.run()
    app.run()

if __name__ == '__main__':
    main()

setttings.py

#密码:如果为空,就没有密码
PASSWORD = ''
#主机ip和端口号
HOST = 'localhost'
PORT = 6379

#代理池的名称
PROXIES = 'proxies_new'
#测试代理的网址
TSET_API = 'https://www.baidu.com/'
#配置测试网址的请求头
TEST_REQUEST_HEADERS = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36',
}
#测试代理的超时时间
TSET_TIME_OUT = 30
#循环校验时间
CYCLE_VALID_TIME = 100
#代理池代理数量的最小值
LOWER_THRESHOLD = 10
#代理池代理的最大值
UPPER_THRESHOLD = 100
#循环检查时间
CYCLE_CHECK_TIME = 100

demo_with.py

import requests

# response = requests.get('https://www.baidu.com/')

with requests.get('https://www.baidu.com/') as response:
    print(response.text)

print(response.status_code)

getter.py

import requests
from lxml import etree
class ProxyMetaclass(type):
    def __new__(cls, name,bases,attrs):
        # print('元类启动!')
        # print('attr1:',attrs)
        #创建一个属性,这个属性是一个list,里面存放的是所有爬取免费代理方法的方法名字。
        attrs['__CrwalFunc__'] = []
        count = 0
        for k,v in attrs.items():
            if 'crawl_' in k:
                attrs['__CrwalFunc__'].append(k)
                count +=1
        attrs['__CrwalCount__'] = count
        # print('attrs2',attrs)
        return type.__new__(cls,name,bases,attrs)


class FreeProxyGetter(object,metaclass=ProxyMetaclass):

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36',
    }
    def get_raw_proxies(self,callback):
        '''
        通过传入一个字符串的方法名,来调用这个方法,获取代理
        :param callback: 方法名的字符串格式--crawl_66ip
        :return: list-->代理
        '''
        proxies = []
        for proxy in eval('self.{}()'.format(callback)):
            proxies.append(proxy)
        return proxies

    #爬取代理方法,名称统一为crawl_
    def crawl_66ip(self):
        '''
        url:http://www.66ip.cn/
        :return:list[proxy]
        '''
        proxies = []
        base_url = 'http://www.66ip.cn/%s.html'
        for i in range(1, 20):
            response = requests.get(base_url % i, headers=self.headers)
            html = etree.HTML(response.text)
            ips = html.xpath('//tr[position()>1]/td[1]/text()')
            ports = html.xpath('//tr[position()>1]/td[2]/text()')
            if len(ips) == len(ports) and ips and ports:
                for i, ip in enumerate(ips):
                    port = ports[i]
                    # print(ip,port)
                    # proxies.append(ip.strip()+':'+port.strip())
                    yield ip.strip()+':'+port.strip()
    def crawl_ip3366(self):
        '''
       url:http://www.ip3366.net/?stype=1&page=1
       :return:list[proxy]
       '''
        proxies = []
        base_url = 'http://www.ip3366.net/?stype=1&page=%s'
        for i in range(1, 11):
            response = requests.get(base_url % i, headers=self.headers)
            html = etree.HTML(response.text)
            ips = html.xpath('//tr/td[1]/text()')
            ports = html.xpath('//tr/td[2]/text()')
            if len(ips) == len(ports) and ips and ports:
                for i, ip in enumerate(ips):
                    port = ports[i]
                    # print(ip,port)
                    # proxies.append(ip.strip()+':'+port.strip())
                    yield ip.strip() + ':' + port.strip()
if __name__ == '__main__':
    f = FreeProxyGetter()
    print(f.__CrwalFunc__)
    # f.crawl_66ip()
    # f.crawl_ip3366()

Scheduler.py

import time
from multiprocessing import Process
from threading import Thread
import aiohttp
from proxypool.setttings import *

import asyncio
from proxypool.db import Reids_Client
from proxypool.getter import FreeProxyGetter

class VaildityTester(object):
    def __init__(self):
        #篮子
        self._raw_proxies = []

    #向篮子放东西
    def set_raw_proxies(self,proxies):
        self._raw_proxies = proxies
        # 数据的所有连接,用的时候在创建。
        self._conn = Reids_Client()
    #校验代理,要使用异步 请求
    async def test_single_proxy(self,proxy):
        try:
            # 创建一个session对象
            async with aiohttp.ClientSession() as session:
                # 参数校验,如果proxy参数是一个bytes类型,就给他转成字符串
                if isinstance(proxy, bytes):
                    proxy = proxy.decode('utf-8')
                real_proxy = 'http://' + proxy
                try:
                    async with session.get(TSET_API,
                                           headers=TEST_REQUEST_HEADERS,
                                           proxy=real_proxy,
                                           timeout=TSET_TIME_OUT) as response:
                        if response.status == 200:
                            # 该代理可用
                            # 添加到代理池
                            self._conn.put(proxy)
                            print('有效代理!', proxy)
                except Exception:
                    print('无效代理!',proxy)
        except Exception as e:
            print(e)
    #校验器的校验方法
    def test(self):
        print('代理池开始启动!')

        #创建一个loop(任务执行链)
        loop = asyncio.get_event_loop()
        #执行任务
        tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
        #启动loop
        loop.run_until_complete(asyncio.wait(tasks))

#添加器
class PoolAdder(object):
    #threshold--阈值--代理池最大值
    def __init__(self,threshold):
        self._threshold = threshold
        #校验
        self._tester = VaildityTester()
        #db
        self._conn = Reids_Client()
        self._crawler =FreeProxyGetter()
    #判断代理池数量是否达到最大值
    def is_over_threshold(self):
        '''

        :return: True:达到 False不能
        '''
        if self._conn.queue_len>=self._threshold:
            return True
        return False
    #添加代理到代理池的方法
    def add_to_queue(self):
        #代理的获取是从getter组件组件中获取的。
        print('添加器开始工作....')
        while True:
            #当添加到代理池数量达到最大值,就不添加了。
            if self.is_over_threshold():
                break
            proxy_count = 0
            #1、先从网上获取免费代理
            #问题:现在只能调用单一的爬取代理的方法,无法从各个网站都获取代理
            #如何实现?
            # for crawl in [crawl_ip3366,crawl_66ip]:
            # proxies  = self._crawler.crawl_ip3366()
            # proxies = self._crawler.crawl_66ip()
            for crawl in self._crawler.__CrwalFunc__:
                #现在我们拿到了方法的名字:crawl_66ip-->str--->eval()
                try:
                    raw_proxies = self._crawler.get_raw_proxies(crawl)
                except Exception:
                    continue
                #2、用校验器校验(校验的同时在添加)
                #2.1先把代理放到篮子
                self._tester.set_raw_proxies(raw_proxies)
                #2.2test方法就自动从篮子中获取代理校验
                self._tester.test()
                proxy_count+=len(raw_proxies)
            if proxy_count==0:
                print('代理网站全部失效,请添加!')
                raise RuntimeError('代理网站全部失效!')


class Scheduler(object):
    @staticmethod
    def vaild_proxy(cycle = CYCLE_VALID_TIME):
        conn = Reids_Client()
        tester = VaildityTester()
        while True:
            print('循环校验器开始启动!')
            count = int(conn.queue_len*0.5)
            if count ==0:
                print('代理池数量不足!正在添加...')
                time.sleep(cycle)
            #从代理池头部取出count个代理,进行校验
            raw_proxies = conn.get(count)
            #调用校验器
            # 2.1先把代理放到篮子
            tester.set_raw_proxies(raw_proxies)
            # 2.2test方法就自动从篮子中获取代理校验
            tester.test()
            time.sleep(cycle)
    @staticmethod
    def check_pool_add(lower_threshold = LOWER_THRESHOLD,
                       upper_threshold = UPPER_THRESHOLD,
                       cycle=CYCLE_CHECK_TIME):
        conn = Reids_Client()
        adder = PoolAdder(upper_threshold)
        while True:
            #如果代理池数量小于最小值,调用添加器添加
            if conn.queue_len<lower_threshold:
                adder.add_to_queue()
            time.sleep(cycle)
    def run(self):
        vaild_process =Process(target=Scheduler.vaild_proxy)
        check_process =Process(target=Scheduler.check_pool_add)
        vaild_process.start()
        check_process.start()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值