advanced_link_crawler.py

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time    : 2019/9/3 16:49
# @Auther  : Frank Hu
# @Email   : 1171624400@qq.com
# @File    : advanced_link_crawler.py
# @Software: PyCharm

from urllib.parse import urlparse
from urllib.error import URLError, HTTPError, ContentTooShortError
from urllib.parse import urljoin
from urllib import robotparser
import urllib.request
import time
import re


def download(url, user_agent='wswp', num_retries=2, charset='utf8', proxy=None):
    print('Downloading: {}'.format(url))
    request = urllib.request.Request(url)
    request.add_header('User-agent', user_agent)
    try:
        if proxy:
            proxy_support = urllib.request.ProxyHandler({'http': proxy})
            opener = urllib.request.build_opener(proxy_support)
            urllib.request.install_opener(opener)
        resp = urllib.request.urlopen(request)
        cs = resp.headers.get_content_charset()
        if not cs:
            cs = charset
        html = resp.read().decode(cs)
    except (URLError, HTTPError, ContentTooShortError) as e:
        print('Downloading error: ', e.reason)
        html = None
        if num_retries > 0:
            if hasattr(e, 'code') and 500 <= e.code < 600:
                return download(url, num_retries=num_retries - 1)
    return html


class Throttle:
    """下载限速"""
    def __init__(self, delay):
        self.delay = delay  # 延时
        self.domains = {}  # {域名: 请求时间戳}

    def wait(self, url):
        domain = urlparse(url).netloc
        last_accessed = self.domains.get(domain)
        if self.delay > 0 and last_accessed is not None:
            sleep_secs = self.delay - (time.time() - last_accessed)
            if sleep_secs > 0:
                time.sleep(sleep_secs)
        self.domains[domain] = time.time()


# 解析robots.txt
def get_robots_parser(robots_url):
    rp = robotparser.RobotFileParser()
    rp.set_url(robots_url)
    rp.read()
    return rp


# 获取传入html页面中的连接(<a>标签的href属性值)
def get_links(html):
    webpage_regex = re.compile("""<a[^>]+href=["'](.*?)["']""", re.IGNORECASE)
    return webpage_regex.findall(html)


# 爬取哪些连接
def link_crawler(start_url, link_regex, robots_url=None, user_agent='wswp',
                 proxy=None, delay=3, max_depth=4):
    crawl_queue = [start_url, ]  # 需要爬取网页连接列表
    # keep track which URL's have seen before
    seen = {}  # 记录访问过的网页链接
    if not robots_url:
        robots_url = '{}/robots.txt'.format(start_url)
    rp = get_robots_parser(robots_url)  # TODO:解析robots.txt
    throttle = Throttle(delay)  # TODO:下载限速
    # 循环需要爬取网页连接列表
    while crawl_queue:
        url = crawl_queue.pop()
        # 检查该URL是否被robots文件允许爬取
        if rp.can_fetch(user_agent, url):
            depth = seen.get(url, 0)  # seen中存在url键则返回值(该url访问次数),否则返回0
            if depth == max_depth:
                print('Skipping %s due to depth' % url)
                continue
            throttle.wait(url)
            html = download(url, user_agent=user_agent, proxy=proxy)  # TODO:下载网页
            if not html:
                continue

            for link in get_links(html):  # TODO:获取传入html页面中的连接(<a>标签的href属性值)
                if re.match(link_regex, link):
                    abs_link = urljoin(start_url, link)
                    if abs_link not in seen:
                        seen[abs_link] = depth + 1
                        crawl_queue.append(abs_link)
        else:
            print('Blocked by robots.txt:', url)


if __name__ == '__main__':
    start_url = 'http://example.python-scraping.com'
    link_regex = '/places/default/(index|view)/'
    # link_crawler(start_url, link_regex, user_agent='BadCrawler')
    link_crawler(start_url, link_regex, max_depth=1)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Traceback (most recent call last): File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 196, in _run_module_as_main return _run_code(code, main_globals, None, File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\runpy.py", line 86, in _run_code exec(code, run_globals) File "C:\Users\29383\AppData\Local\Programs\Python\Python310\Scripts\scrapy.exe\__main__.py", line 7, in <module> File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\cmdline.py", line 144, in execute cmd.crawler_process = CrawlerProcess(settings) File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\crawler.py", line 280, in __init__ super().__init__(settings) File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\crawler.py", line 156, in __init__ self._handle_twisted_reactor() File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\crawler.py", line 343, in _handle_twisted_reactor install_reactor(self.settings["TWISTED_REACTOR"], self.settings["ASYNCIO_EVENT_LOOP"]) File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\site-packages\scrapy\utils\reactor.py", line 66, in install_reactor asyncioreactor.install(eventloop=event_loop) File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\site-packages\twisted\internet\asyncioreactor.py", line 306, in install reactor = AsyncioSelectorReactor(eventloop) File "C:\Users\29383\AppData\Local\Programs\Python\Python310\lib\site-packages\twisted\internet\asyncioreactor.py", line 61, in __init__ raise TypeError( TypeError: ProactorEventLoop is not supported, got: <ProactorEventLoop running=False closed=False debug=False> PS C:\dongman\spiders>
06-03

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值