#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/9/3 16:49
# @Auther : Frank Hu
# @Email : 1171624400@qq.com
# @File : advanced_link_crawler.py
# @Software: PyCharm
from urllib.parse import urlparse
from urllib.error import URLError, HTTPError, ContentTooShortError
from urllib.parse import urljoin
from urllib import robotparser
import urllib.request
import time
import re
def download(url, user_agent='wswp', num_retries=2, charset='utf8', proxy=None):
print('Downloading: {}'.format(url))
request = urllib.request.Request(url)
request.add_header('User-agent', user_agent)
try:
if proxy:
proxy_support = urllib.request.ProxyHandler({'http': proxy})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
resp = urllib.request.urlopen(request)
cs = resp.headers.get_content_charset()
if not cs:
cs = charset
html = resp.read().decode(cs)
except (URLError, HTTPError, ContentTooShortError) as e:
print('Downloading error: ', e.reason)
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
return download(url, num_retries=num_retries - 1)
return html
class Throttle:
"""下载限速"""
def __init__(self, delay):
self.delay = delay # 延时
self.domains = {} # {域名: 请求时间戳}
def wait(self, url):
domain = urlparse(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (time.time() - last_accessed)
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = time.time()
# 解析robots.txt
def get_robots_parser(robots_url):
rp = robotparser.RobotFileParser()
rp.set_url(robots_url)
rp.read()
return rp
# 获取传入html页面中的连接(<a>标签的href属性值)
def get_links(html):
webpage_regex = re.compile("""<a[^>]+href=["'](.*?)["']""", re.IGNORECASE)
return webpage_regex.findall(html)
# 爬取哪些连接
def link_crawler(start_url, link_regex, robots_url=None, user_agent='wswp',
proxy=None, delay=3, max_depth=4):
crawl_queue = [start_url, ] # 需要爬取网页连接列表
# keep track which URL's have seen before
seen = {} # 记录访问过的网页链接
if not robots_url:
robots_url = '{}/robots.txt'.format(start_url)
rp = get_robots_parser(robots_url) # TODO:解析robots.txt
throttle = Throttle(delay) # TODO:下载限速
# 循环需要爬取网页连接列表
while crawl_queue:
url = crawl_queue.pop()
# 检查该URL是否被robots文件允许爬取
if rp.can_fetch(user_agent, url):
depth = seen.get(url, 0) # seen中存在url键则返回值(该url访问次数),否则返回0
if depth == max_depth:
print('Skipping %s due to depth' % url)
continue
throttle.wait(url)
html = download(url, user_agent=user_agent, proxy=proxy) # TODO:下载网页
if not html:
continue
for link in get_links(html): # TODO:获取传入html页面中的连接(<a>标签的href属性值)
if re.match(link_regex, link):
abs_link = urljoin(start_url, link)
if abs_link not in seen:
seen[abs_link] = depth + 1
crawl_queue.append(abs_link)
else:
print('Blocked by robots.txt:', url)
if __name__ == '__main__':
start_url = 'http://example.python-scraping.com'
link_regex = '/places/default/(index|view)/'
# link_crawler(start_url, link_regex, user_agent='BadCrawler')
link_crawler(start_url, link_regex, max_depth=1)
advanced_link_crawler.py
最新推荐文章于 2020-08-24 10:19:54 发布