网络爬虫一

想要爬取网页首先就要把网页下载下来,python提供的urllib2模块可用来下载URL。

有三种方法

1,网站地图爬虫

该方法依赖于sitmap文件。

#coding:utf-8
import re
from common import download


def crawl_sitemap(url):
    # 下载网络地图文件即.xml文件
    sitemap = download(url)
    # extract the sitemap links
    #这里采用正则表达式的方式从<loc></loc>标签中提取URL
    links = re.findall('<loc>(.*?)</loc>', sitemap)
    # download each link
    for link in links:
        html = download(link)
        

#主函数
if __name__ == '__main__':
    crawl_sitemap('http://example.webscraping.com/sitemap.xml')

 

common.py

# -*- coding: utf-8 -*-

import urllib2
import urlparse

# user_agent 用户代理
# num_retries 遇到服务器端的错误即5xx,重新下载次数
def download(url, user_agent='wswp', num_retries=2):
    
    print 'Downloading:', url
    #headers——是字典类型,头字典可以作为参数在request时直接传入,也可以把每个键和值作为参数调用add_header()方法来添加。作为辨别浏览器身份的User-Agent header是经常被用来恶搞和伪装的,因为一些HTTP服务只允许某些请求来自常见的浏览器而不是脚本,或是针对不同的浏览器返回不同的版本
    headers = {'User-agent': user_agent}
    #Request类是一个抽象的URL请求
    request = urllib2.Request(url, headers=headers)
    try:
        html = urllib2.urlopen(request).read() #访问url
    except urllib2.URLError as e:
        print 'Download error:', e.reason
        html = None
        if num_retries > 0:
            #hasattr判断对象是否包含对应属性
            if hasattr(e, 'code') and 500 <= e.code < 600:
                # retry 5XX HTTP errors
                html = download(url, user_agent, num_retries-1)
    return html
if __name__ == '__main__':
    print download('http://example.webscraping.com')

2 ID遍历爬虫

# -*- coding: utf-8 -*-


import itertools #迭代器
from common import download


def iteration():
    max_errors = 5 # 允许连续下载次数的最大限制
    num_errors = 0 # 当前下载错误的次数
    for page in itertools.count(1):
        #忽略页面别名,只遍历ID
        url = 'http://example.webscraping.com/view/-{}'.format(page)
        html = download(url)
        if html is None:
            # received an error trying to download this webpage
            num_errors += 1
            if num_errors == max_errors:
                # reached maximum amount of errors in a row so exit
                break
            # so assume have reached the last country ID and can stop downloading
        else:
            # success - can scrape the result
            # ...
            num_errors = 0


if __name__ == '__main__':
    iteration()

3 链接爬虫

像普通用户一样跟踪链接,下载感兴趣的网页

这里也使用正则表达式匹配感兴趣的网页、

这个例子中也加入了一些高级功能:

1,解析robots.txt文件,以避免下载禁止爬取的URL ,这里使用python自代的robotparser模板

2支持代理 ,这里使用urllib2支持代理的方法。也可以使用更友好的python HTTP模块的requests来实现

3 下载限速 再两次下载之间增加延时,避免爬取过快

4 避免爬虫陷阱 ,即页面无止境的链接下去,这里通过记录网页经过多少个链接的方法。

 

 

 

 

 

 

#coding:utf-8 


import re
import urlparse
import urllib2
import time
from datetime import datetime
import robotparser
import Queue


def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, headers=None, user_agent='wswp', proxy=None, num_retries=1):
    #seed_url 要爬取网站的url
    # link_regex 用于跟踪链接的正则表达式
    crawl_queue = Queue.deque([seed_url]) #需要爬取的url 队列
   
    seen = {seed_url: 0}#避免重复下载以及爬虫陷阱
  
    num_urls = 0 #已经下载的url数量
    
    rp = get_robots(seed_url)#解析robots.txt文件,避免下载禁止爬取的url

    throttle = Throttle(delay) #下载限速
    headers = headers or {}
    if user_agent: #用户代理
        headers['User-agent'] = user_agent

    while crawl_queue:
        url = crawl_queue.pop()
        # 检查 url 通过 robots.txt 限制
        if rp.can_fetch(user_agent, url): #确定用户代理是否允许访问网页
            throttle.wait(url)#检查是否需要限速
            html = download(url, headers, proxy=proxy, num_retries=num_retries)
            links = []
            #避免爬虫陷阱
            depth = seen[url]
            if depth != max_depth:
                #匹配正则表达式
                if link_regex:
                    # filter for links matching our regular expression
                    links.extend(link for link in get_links(html) if re.match(link_regex, link)

                for link in links:
                    link = normalize(seed_url, link)#改为绝对路径
                    # 检查是否已经抓取
                    if link not in seen:
                        seen[link] = depth + 1
                        # 检查两个域名是否相同
                        if same_domain(seed_url, link):
                            # success! add this new link to queue
                            crawl_queue.append(link)

            # check whether have reached downloaded maximum
            num_urls += 1
            if num_urls == max_urls:
                break
        else:
            print 'Blocked by robots.txt:', url

#下载限速,在两次下载之间增加延迟
class Throttle:
    """Throttle downloading by sleeping between requests to same domain
    """
    def __init__(self, delay):
        # amount of delay between downloads for each domain
        self.delay = delay
        # timestamp of when a domain was last accessed
        self.domains = {}
        
    def wait(self, url):
        domain = urlparse.urlparse(url).netloc
        last_accessed = self.domains.get(domain)

        if self.delay > 0 and last_accessed is not None:
            sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
            if sleep_secs > 0:
                time.sleep(sleep_secs)
        self.domains[domain] = datetime.now()

# 下载类
def download(url, headers, proxy, num_retries, data=None):
    print 'Downloading:', url
    request = urllib2.Request(url, data, headers)
    #支持代理
    opener = urllib2.build_opener()
    if proxy:
        proxy_params = {urlparse.urlparse(url).scheme: proxy}
        opener.add_handler(urllib2.ProxyHandler(proxy_params))
    try:
        response = opener.open(request)
        html = response.read()
        code = response.code
    except urllib2.URLError as e:
        print 'Download error:', e.reason
        html = ''
        if hasattr(e, 'code'):
            code = e.code
            if num_retries > 0 and 500 <= code < 600:
                # retry 5XX HTTP errors
                return download(url, headers, proxy, num_retries-1, data)
        else:
            code = None
    return html


def normalize(seed_url, link):
    """Normalize this URL by removing hash and adding domain
    """
    #创建绝对路径
    link, _ = urlparse.urldefrag(link) # remove hash to avoid duplicates
    return urlparse.urljoin(seed_url, link)

#判断两个url是否属于相同的域
def same_domain(url1, url2):
    """Return True if both URL's belong to same domain
    """
    return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc

#解析robot.txt文件
def get_robots(url):
    """Initialize robots parser for this domain
    """
    rp = robotparser.RobotFileParser()
    rp.set_url(urlparse.urljoin(url, '/robots.txt'))
    rp.read()
    return rp
        

def get_links(html):
    """Return a list of links from html 
    """
    # a regular expression to extract all links from the webpage
    webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
    # list of all links from the webpage
    return webpage_regex.findall(html)

#主函数
if __name__ == '__main__':
    link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, user_agent='BadCrawler')
    link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, max_depth=1, user_agent='GoodCrawler')

 

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值