#-*- coding:UTF-8 -*- import re import urlparse import urllib2 import time from datetime import datetime import robotparser import Queue import lxml.html import csv def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, headers=None, user_agent='wswp', proxy=None, num_retries=1, scrape_callback=None): """Crawl from the given seed URL following links matched by link_regex """ # the queue of URL's that still need to be crawled 双向队列里面存储url crawl_queue = Queue.deque([seed_url]) # the URL's that have been seen and at what depth seen = {seed_url: 0} # track how many URL's have been downloaded num_urls = 0 rp = get_robots(seed_url)#获取robots.txt throttle = Throttle(delay)#下载限速 headers = headers or {} if user_agent: headers['User-agent'] = user_agent#用户代理 while crawl_queue: url = crawl_queue.pop()#移除列表中的元素,并且返回该元素的值 # check url passes robots.txt restrictions if rp.can_fetch(user_agent, url):#确定指定的用户代理是否允许访问网页 throttle.wait(url)#延迟 html = download(url, headers, proxy=proxy, num_retries=num_retries) links = [] if scrape_callback: links.extend(scrape_callback(url, html) or []) depth = seen[url] if depth != max_depth: # can still crawl further if link_regex: # filter for links matching our regular expression links.extend(link for link in get_links(html) if re.match(link_regex, link)) for link in links: link = normalize(seed_url, link) #返回绝对链接 # check whether already crawled this link if link not in seen: seen[link] = depth + 1 # check link is within same domain if same_domain(seed_url, link): # success! add this new link to queue crawl_queue.append(link) # check whether have reached downloaded maximum num_urls += 1 if num_urls == max_urls: break else: print 'Blocked by robots.txt:', url #下载限速 class Throttle: """Throttle downloading by sleeping between requests to same domain """ def __init__(self, delay): # amount of delay between downloads for each domain self.delay = delay # timestamp of when a domain was last accessed self.domains = {} def wait(self, url): domain = urlparse.urlparse(url).netloc#服务器位置 last_accessed = self.domains.get(domain) if self.delay > 0 and last_accessed is not None: sleep_secs = self.delay - (datetime.now() - last_accessed).seconds if sleep_secs > 0: time.sleep(sleep_secs) self.domains[domain] = datetime.now() def download(url, headers, proxy, num_retries, data=None): print 'Downloading:', url request = urllib2.Request(url, data, headers) opener = urllib2.build_opener() if proxy: proxy_params = {urlparse.urlparse(url).scheme: proxy} opener.add_handler(urllib2.ProxyHandler(proxy_params)) try: response = opener.open(request) html = response.read() code = response.code except urllib2.URLError as e: print 'Download error:', e.reason html = '' if hasattr(e, 'code'): code = e.code if num_retries > 0 and 500 <= code < 600: # retry 5XX HTTP errors return download(url, headers, proxy, num_retries - 1, data) else: code = None return html def normalize(seed_url, link): """Normalize this URL by removing hash and adding domain """ link, _ = urlparse.urldefrag(link) # remove hash to avoid duplicates urldefrag(url)将url分解成去掉fragment的新url和去掉的fragment的二元组 return urlparse.urljoin(seed_url, link)#绝对链接 def same_domain(url1, url2): """Return True if both URL's belong to same domain """ #将url分解成部件的6元组 return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netloc def get_robots(url): """Initialize robots parser for this domain """ rp = robotparser.RobotFileParser() rp.set_url(urlparse.urljoin(url, '/robots.txt'))#绝对链接 rp.read() return rp def get_links(html): """Return a list of links from html """ # a regular expression to extract all links from the webpage #re.compile()函数将正则表达式的字符串形式编译为Pattern实例,然后使用Pattern实例处理文本并获得匹配结果(一个Match实例),最后使用Match实例获得信息,进行其他的操作。 webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE) # list of all links from the webpage return webpage_regex.findall(html) class ScrapeCallback: def __init__(self): self.writer = csv.writer(open('countries.csv', 'w')) self.fields = ('area', 'population', 'iso', 'country', 'capital', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages') self.writer.writerow(self.fields) def __call__(self, url, html): if re.search('view', url): tree = lxml.html.fromstring(html) row = [] for field in self.fields: row.append(tree.cssselect('table > tr#places_{}__row > td.w2p_fw'.format(field))[0].text_content()) self.writer.writerow(row) if __name__ == '__main__': link_crawler('http://example.webscraping.com', '/(index|view)', max_depth=-1, delay=0, num_retries=1, user_agent='GoodCrawler', scrape_callback=ScrapeCallback())
为链接爬虫添加抓取回调
最新推荐文章于 2024-05-03 07:46:08 发布