python简单网络爬虫_【Python】简单的网络爬虫

完整代码

# encoding:UTF-8

# from bs4 import BeautifulSoup

import urlparse

import urllib2

import re

import robotparser

import datetime

import time

import itertools

import Queue  # 同步的、线程安全的队列类

import lxml.html

import lxml.cssselect

import csv

def crawl_sitemap(url, scrape_callback=None):

"""

1.通过robots文件记录的链接爬虫

:param url:

:return:如果有回调函数,返回回调结果

"""

sitemap = urllib2.urlopen(url).read()

links = re.findall('(.*?)', sitemap)

if scrape_callback:

for link in links:

html = urllib2.urlopen(link).read()

scrape_callback(link, html)

def crawl_id(url, scrape_callback=None):

"""

2.通过ID爬虫,连续发生多次下载错误才会退出

:param url:含有ID的链接的公共部分,没有/

:return:

"""

max_error = 5

num_error = 0

throttle = Throttle(5)

for page in itertools.count(1):  # 迭代器,从1开始

link = url + ("/-%d" % page)

html = urllib2.urlopen(link).read()

if html is None:

num_error += 1

if num_error == max_error:

break

else:  # 网页存在

throttle.wait(link)

scrape_callback(link, html)

num_error = 0

def link_crawler(seed_url, link_regex=None, delay=-1, max_depth=1, max_urls=-1,

headers=None, user_agent="wswp", proxy=None, num_retries=2, scrape_callback=None):

"""

3.通过链接爬虫,深度优先,禁用某些功能可将其参数设为负数

待爬虫队列存在,逐一判断robots访问权限,在等待一定时间后进行下载,

并根据访问深度决定是否继续进行访问。如继续,根据正则表达式匹配获

取链接集,逐一规范化后,若某链接没有被访问过,且域名和种子网址域名相同,

则归入待爬虫队列。每完成一次访问,链接总数+1

:param seed_url:种子链接

:param link_regex:目标链接识别正则表达式

:param user_agent:用户代理

:return:爬虫结果

"""

crawl_queue = Queue.deque([seed_url])

seen = {seed_url: 0}

num_urls = 0

rp = get_robots(seed_url)

throttle = Throttle(delay)

headers = headers or {}

if user_agent:

headers['User-agent'] = user_agent

while crawl_queue:

url = crawl_queue.pop()

depth = seen[url]

if rp.can_fetch(user_agent, url):

throttle.wait(url)

html = download(url, headers, proxy=proxy, num_retries=num_retries)

links = []

if scrape_callback:

# links.extend(scrape_callback(url, html) or [])

scrape_callback(url, html)

if depth != max_depth:

# 获取链接

if link_regex:

links.extend(link for link in get_links(html) if re.match(link_regex, link))

# 如果没有被访问过,且域名相同,归入连接诶队列

for link in links:

link = normalize(seed_url, link)

if link not in seen:

seen[link] = depth + 1

if same_domain(seed_url, link):

crawl_queue.append(link)

num_urls += 1

if num_urls == max_urls:

break

else:

print("Blocked by robots.txt:", url)

class Throttle:

"""

在两次下载之间添加时间延迟

"""

def __init__(self, delay):

self.delay = delay  # 延迟多长时间

self.domains = {}  # 字典记录域名最后一次被访问的时间地图

def wait(self, url):

"""

功能:页面休眠

urlparse将url(http://开头)解析成组件

组件:协议(scheme)、位置(netloc)、路径(path)、可选参数(parameters)、查询(query)、片段(fragment)

:param url:

:return:

"""

domain = urlparse.urlparse(url).netloc

last_accessed = self.domains.get(domain)

if self.delay > 0 and last_accessed is not None:

sleep_secs = self.delay - (datetime.datetime.now() - last_accessed).seconds

if sleep_secs > 0:

time.sleep(sleep_secs)

self.domains[domain] = datetime.datetime.now()

class ScrapeCallback:

def __init__(self):

self.writer = csv.writer(open('countries.csv', 'w'))

self.fields = ('area', 'population', 'iso', 'country', 'capital',

'continent', 'tld', 'currency_code', 'currency_name',

'phone', 'postal_code_format', 'postal_code_regex', 'languages')

self.writer.writerow(self.fields)

def __call__(self, url, html):

"""

:param url:判断是否是目标链接

:param html:下载数据的页面

:return:

"""

if re.search('/view/', url):

tree = lxml.html.fromstring(html)

row = []

for field in self.fields:

row.append(tree.cssselect('table>tr#places_{}__row>td.w2p_fw'.format(field))[0].text_content())

self.writer.writerow(row)

def download(url, headers, proxy, num_retries, data=None):

"""

设置一般的请求后,根据爬虫代理参数选择是否使用特定处理器来获取链接

若遇到50X网页暂时无法访问的情况,尝试多次后无果则退出

:param url:链接

:param user_agent:用户代理

:param proxy:协议,ip端口

:param num_retries:出错是尝试访问多少次

:return: 整个网页的源代码

"""

print("Downloading:", url)

request = urllib2.Request(url, data, headers)

opener = urllib2.build_opener()  # 用特定处理器来获取urls

if proxy:

proxy_params = {urlparse.urlparse(url).scheme: proxy}

opener.add_handler(urllib2.ProxyHandler(proxy_params))

try:

html = urllib2.urlopen(request).read()

# # 数据获取方式1:正则表达式(C、快、使用困难、灵活性差)

# result = re.findall('

(.*?)', html)

# if result:

#    print(result[1])

# # 数据获取方式2:通过beautifulsoup(Python、慢、安装简单)

# soup = BeautifulSoup(html, 'html.parser')

# tr = soup.find(attrs={'id': 'places_area__row'})

# if tr:

#    td = tr.find(attrs={'class': 'w2p_fw'})

#    area = td.text

#    print(area)

# # 数据获取方式3:通过lxml(快、大量数据抓取效果更明显、安装相对困难)

# tree = lxml.html.fromstring(html)

# td = tree.cssselect('tr#places_neighbours__row > td.w2p_fw')

# if td:

#    area = td[0].text_content()

#    print(area)

except urllib2.URLError as e:

print("Download error:", e.reason)

html = None

if num_retries > 0:

if hasattr(e, 'code') and 500 <= e.code <= 600:

return download(url, headers, proxy, num_retries - 1, data)

return html

def get_links(html):

"""

提取网页中的所有链接

:param html:

:return:

"""

webpage_regex = re.compile(']+href=["\'](.*?)["\']',

re.IGNORECASE)  # re.compile()函数将字符串形式的正则表达式转换成模式

return webpage_regex.findall(html)

def get_robots(url):

"""

:param url:

:return: 包含robots信息的对象

"""

rp = robotparser.RobotFileParser()

rp.set_url(urlparse.urljoin(url, '/robots.txt'))

rp.read()

return rp

def normalize(seed_url, link):

"""

链接规范化,相对路径转化成绝对路径

:param seed_link:

:param link:

:return:

"""

link, _ = urlparse.urldefrag(link)  # 去掉碎部(链接#后的部分)

return urlparse.urljoin(seed_url, link)

def same_domain(url1, url2):

"""

两个链接的域名相同,为True

:param url1:

:param url2:

:return:

"""

return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc

def main():

## 1.通过robots文件记录的链接爬虫

# crawl_sitemap('http://example.webscraping.com/sitemap.xml', scrape_callback=ScrapeCallback())

# # 2.通过ID爬虫

# crawl_id('http://example.webscraping.com/places/default/view',scrape_callback=ScrapeCallback())

# 3.通过链接爬虫

link_crawler('http://example.webscraping.com', '/places/default/(view|index)',

delay=0, num_retries=5, max_depth=2, user_agent='GoodCrawler', scrape_callback=ScrapeCallback())

main()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值