python3爬虫(网页下载)

#!/usr/bin/python
import re
import urllib.parse
import urllib
import time
from datetime import datetime
import urllib.robotparser
import collections


def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, headers=None, user_agent='wswp', proxy=None, num_retries=1):
    crawl_queue = collections.deque([seed_url])
    seen = {seed_url: 0}
    #已经看过的页面和查找的深度,防止爬虫陷阱
    num_urls = 0
    #记录已经下载多少个链接
    rp = get_robots(seed_url)
    #获取了robots协议
    throttle = Throttle(delay)
    headers = headers or {}
    if user_agent:
        headers['User-agent'] = user_agent

    while crawl_queue:
        url = crawl_queue.pop()
        # check url passes robots.txt restrictions
        if rp.can_fetch(user_agent, url):
            #指定用户是否可以访问
            throttle.wait(url)
            #限速的等待
            html = download(url, headers, proxy=proxy, num_retries=num_retries)
            links = []

            depth = seen[url]
            #得到之前目录的深度
            if depth != max_depth:
                # can still crawl further
                if link_regex:
                    #如果给了制定目录
                    links.extend(get_links(html))
                for link in links:
                 #等于获取目录,将搜到的网页名字存起来
                 #之后while循环的download才会进行真正的下载
                    link = normalize(seed_url, link)
                    #将目录和域名构建起来,成为可以直接访问某个内容的地址

                    if link not in seen:
                        seen[link] = depth + 1
                        # 检查是不是一样的域名
                        if same_domain(seed_url, link):
                            # 添加新地址进入队列
                            crawl_queue.append(link)

            num_urls += 1
            if num_urls == max_urls:
                break
        else:
            print ('Blocked by robots.txt:', url)


class Throttle:
 #下载限速
    def __init__(self, delay):
        self.delay = delay
        self.domains = {}
        #记录上次访问

    def wait(self, url):
        domain = urllib.parse.urlparse(url).netloc
        last_accessed = self.domains.get(domain)
        #获得字典的建值

        if self.delay > 0 and last_accessed is not None:
            sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
            if sleep_secs > 0:
                time.sleep(sleep_secs)
        self.domains[domain] = datetime.now()
        #添加键值对


def download(url, headers, proxy, num_retries, data=None):
    print ('Downloading:', url)
    request = urllib.request.Request(url, data, headers)
    opener = urllib.request.build_opener()
    if proxy:
        proxy_params = {urlparse.urlparse(url).scheme: proxy}
        opener.add_handler(urllib2.ProxyHandler(proxy_params))
    try:
        response = opener.open(request)
        html = response.read().decode("utf-8")
        #python3必须写decode
        code = response.code
    except urllib.error.URLError as e:
        print ('Download error:', e.reason)
        html = ''
        if hasattr(e, 'code'):
            code = e.code
            if num_retries > 0 and 500 <= code < 600:
                return download(url, headers, proxy, num_retries-1, data)
        else:
            code = None
    return html


def normalize(seed_url, link):
    link, _ = urllib.parse.urldefrag(link) # remove hash to avoid duplicates
    #link就是去掉#部分的网址
    return urllib.parse.urljoin(seed_url, link)#将link加入seed_url
    #等于把想查找的内容目录部分加到了域名上


def same_domain(url1, url2):
    #是否域名相同
    return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc


def get_robots(url):
    #读取robots协议
    rp = urllib.robotparser.RobotFileParser()
    rp.set_url(urllib.parse.urljoin(url, '/robots.txt'))
    rp.read()
    return rp


def get_links(html):
    #获取匹配的网页
    return re.findall('<a href="(.*?)"', html)
    #正则的匹配

if __name__ == '__main__':
    link_crawler('http://example.webscraping.com/index', '/(index|view)',max_depth=1,user_agent='BadCrawler')
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值