针对《用Python写网络爬虫》的第二章,该章主要讲述了如何从网页中抽取数据。
1. 现讲述一下正则匹配数据的方法:
import urllib2
import re
def scrape(html):
area = re.findall('<tr id="places_area__row">.*?<td\s*class=["\']w2p_fw["\']>(.*?)</td>', html)[0]
return area
if __name__ == '__main__':
html = urllib2.urlopen('http://example.webscraping.com/places/default/view/United-Kingdom-239').read()
print scrape(html)
#244,820 square kilometres
2. 使用 beautiful-soup匹配数据:
安装beautifulsoup的方法: pip install beautifulsoup4
示例代码:
#exmple: https://bitbucket.org/wswp/code/src/9e6b82b47087c2ada0e9fdf4f5e037e151975f0f/chapter02/bs_example.py?at=default&fileviewer=file-view-default
import urllib2
from bs4 import BeautifulSoup
def scrape(html):
soup = BeautifulSoup(html, 'html.parser')
tr = soup.find(attrs={'id':'places_area__row'}) # locate the area row
# 'class' is a special python attribute so instead 'class_' is used
td = tr.find(attrs={'class':'w2p_fw'}) # locate the area tag
area = td.text # extract the area contents from this tag
return area
if __name__ == '__main__':
html = urllib2.urlopen('http://example.webscraping.com/places/default/view/United-Kingdom-239').read()
print scrape(html)
#244,820 square kilometres
3. lxml + cssselect 匹配数据:
安装lxml的方法:配置pip源,pip install lxml
安装cssselect的方法: pip install cssselect
# -*- coding: utf-8 -*-
import urllib2
from lxml import html
def scrape(htmlContent):
tree = html.fromstring(htmlContent)
td = tree.cssselect('tr#places_area__row > td.w2p_fw')[0]
area = td.text_content()
#244,820 square kilometres
return area
if __name__ == '__main__':
htmlContent = urllib2.urlopen('http://example.webscraping.com/places/default/view/United-Kingdom-239').read()
print scrape(htmlContent)
cssselect对css3标签支持的情况参照:
https://cssselect.readthedocs.io/en/latest/#user-api
4. regex, beautifulsoup, lxml三种方式的性能对比
抓取同一个地址:
http://example.webscraping.com/places/default/view/United-Kingdom-239
执行10次,结果如下:
代码部分:
# -*- coding: utf-8 -*-
import csv
import time
import urllib2
import re
import timeit
from bs4 import BeautifulSoup
import lxml.html
#FIELDS = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')
FIELDS = ('area', 'population', 'iso')
def regex_scraper(html):
results = {}
for field in FIELDS:
results[field] = re.search('<tr id="places_{}__row">.*?<td class="w2p_fw">(.*?)</td>'.format(field), html).groups()[0]
return results
def beautiful_soup_scraper(html):
soup = BeautifulSoup(html, 'html.parser')
results = {}
for field in FIELDS:
results[field] = soup.find('table').find('tr', id='places_{}__row'.format(field)).find('td', class_='w2p_fw').text
return results
def lxml_scraper(html):
tree = lxml.html.fromstring(html)
results = {}
for field in FIELDS:
results[field] = tree.cssselect('table > tr#places_{}__row > td.w2p_fw'.format(field))[0].text_content()
return results
def main():
times = {}
html = urllib2.urlopen('http://example.webscraping.com/places/default/view/United-Kingdom-239').read()
NUM_ITERATIONS = 10 # number of times to test each scraper
for name, scraper in ('Regular expressions', regex_scraper), ('Beautiful Soup', beautiful_soup_scraper), ('Lxml', lxml_scraper):
times[name] = []
# record start time of scrape
start = time.time()
for i in range(NUM_ITERATIONS):
if scraper == regex_scraper:
# the regular expression module will cache results
# so need to purge this cache for meaningful timings
re.purge()
result = scraper(html)
# check scraped result is as expected
assert(result['area'] == '244,820 square kilometres')
times[name].append(time.time() - start)
# record end time of scrape and output the total
end = time.time()
print '{}: {:.2f} seconds'.format(name, end - start)
writer = csv.writer(open('times.csv', 'w'))
header = sorted(times.keys())
writer.writerow(header)
for row in zip(*[times[scraper] for scraper in header]):
writer.writerow(row)
if __name__ == '__main__':
main()
通过以上结果可以总结出以上抓取方法的优缺点:
抓取方法 | 性能 | 使用难度 | 安装难度 |
---|---|---|---|
正则表达式 | 快 | 困难 | 简单(内置模块) |
Beautiful Soup | 慢 | 简单 | 简单(纯python) |
Lxml | 快 | 简单 | 相对困难 |
通常情况下,lxml是抓取数据的最好选择,因为该方法既快速又健壮,而正则和Beautiful Soup只在特定场景下有用。
5. 为链接爬虫添加抓取回调
link_crawler.py:
import re
import urlparse
import urllib2
import time
from datetime import datetime
import robotparser
import Queue
def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, headers=None, user_agent='wswp', proxy=None, num_retries=1, scrape_callback=None):
"""Crawl from the given seed URL following links matched by link_regex
"""
# the queue of URL's that still need to be crawled
crawl_queue = [seed_url]
# the URL's that have been seen and at what depth
seen = {seed_url: 0}
# track how many URL's have been downloaded
num_urls = 0
rp = get_robots(seed_url)
throttle = Throttle(delay)
headers = headers or {}
if user_agent:
headers['User-agent'] = user_agent
while crawl_queue:
url = crawl_queue.pop()
depth = seen[url]
# check url passes robots.txt restrictions
if rp.can_fetch(user_agent, url):
throttle.wait(url)
html = download(url, headers, proxy=proxy, num_retries=num_retries)
links = []
if scrape_callback:
links.extend(scrape_callback(url, html) or [])
if depth != max_depth:
# can still crawl further
temp_links = get_links(html)
if link_regex:
# filter for links matching our regular expression
links.extend(link for link in temp_links if re.match(link_regex, link))
for link in links:
link = normalize(seed_url, link)
# check whether already crawled this link
if link not in seen:
seen[link] = depth + 1
# check link is within same domain
if same_domain(seed_url, link):
# success! add this new link to queue
crawl_queue.append(link)
# check whether have reached downloaded maximum
num_urls += 1
if num_urls == max_urls:
break
else:
print 'Blocked by robots.txt:', url
class Throttle:
"""Throttle downloading by sleeping between requests to same domain
"""
def __init__(self, delay):
# amount of delay between downloads for each domain
self.delay = delay
# timestamp of when a domain was last accessed
self.domains = {}
def wait(self, url):
"""Delay if have accessed this domain recently
"""
domain = urlparse.urlsplit(url).netloc
last_accessed = self.domains.get(domain)
if self.delay > 0 and last_accessed is not None:
sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
if sleep_secs > 0:
time.sleep(sleep_secs)
self.domains[domain] = datetime.now()
def download(url, headers, proxy, num_retries, data=None):
print 'Downloading:', url
request = urllib2.Request(url, data, headers)
opener = urllib2.build_opener()
if proxy:
proxy_params = {urlparse.urlparse(url).scheme: proxy}
opener.add_handler(urllib2.ProxyHandler(proxy_params))
try:
response = opener.open(request)
html = response.read()
code = response.code
except urllib2.URLError as e:
print 'Download error:', e.reason
html = ''
if hasattr(e, 'code'):
code = e.errno
if num_retries > 0 and 500 <= code < 600:
# retry 5XX HTTP errors
html = download(url, headers, proxy, num_retries-1, data)
else:
code = None
return html
def normalize(seed_url, link):
"""Normalize this URL by removing hash and adding domain
"""
link, _ = urlparse.urldefrag(link) # remove hash to avoid duplicates
return urlparse.urljoin(seed_url, link)
def same_domain(url1, url2):
"""Return True if both URL's belong to same domain
"""
return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc
def get_robots(url):
"""Initialize robots parser for this domain
"""
rp = robotparser.RobotFileParser()
rp.set_url(urlparse.urljoin(url, '/robots.txt'))
rp.read()
return rp
def get_links(html):
"""Return a list of links from html
"""
# a regular expression to extract all links from the webpage
webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
# list of all links from the webpage
return webpage_regex.findall(html)
回调函数:
# -*- coding: utf-8 -*-
import csv
import re
import urlparse
import lxml.html
from link_crawler import link_crawler
class ScrapeCallback:
def __init__(self):
self.writer = csv.writer(open('countries.csv', 'w'))
self.fields = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')
self.writer.writerow(self.fields)
def __call__(self, url, html):
if re.search('/places/default/view/', url):
tree = lxml.html.fromstring(html)
row = []
for field in self.fields:
row.append(tree.cssselect('table > tr#places_{}__row > td.w2p_fw'.format(field))[0].text_content())
print row
self.writer.writerow(row)
if __name__ == '__main__':
link_crawler('http://example.webscraping.com/places/default/', link_regex='/places/default/(index|view)', max_depth=1, num_retries=3, scrape_callback=ScrapeCallback())
以上代码的执行结果:
以上就是《用Python写网络爬虫》的第二章的概括总结了。