python核心编程第二版中的网络爬虫程序,是在python2之下开发,不适应python3环境下运行。对其中相关代码进行了修改,不足之处,还希望大家多多提出意见。代码如下:
#!/usr/bin/env python import urllib.request from sys import argv from os import makedirs, unlink from os.path import isdir, exists, dirname, splitext from html.parser import HTMLParser from urllib.parse import urlparse, urljoin class MyHTMLParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.anchorlist = [] def handle_starttag(self, tag, attrs): if tag == 'a': for name,value in attrs: if name == 'href': self.anchorlist.append(value) class Retriever: # download Web pages def __init__(self, url): self.url = url self.file = self.filename(url) self.parser = MyHTMLParser() def filename(self, url, deffile='index.htm'): parsedurl = urlparse(url, 'http:', 0) # parse path path = parsedurl[1] + parsedurl[2] print('path='+path) ext = splitext(path) if ext[1] == '': if path[-1] == '/': path = path + deffile else: path = path + '/' + deffile dir = dirname(path) if not isdir(dir): # create archive dir if nec. if exists(dir): unlink(dir) makedirs(dir) return path def download(self): # download Web page try: retval = urllib.request.urlretrieve(self.url, self.file) except IOError: retval = ('*** ERROR: invalid URL "%s"' % \ self.url, ) return retval def parseAndGetLinks(self): # pars HTML, save links self.parser.feed(open(self.file).read()) self.parser.close() return self.parser.anchorlist class Crawler: # manage entire crawling process count = 0 # static downloaded page counter def __init__(self, url): self.q = [url] self.seen = [] self.dom = urlparse(url)[1] def getPage(self, url): r = Retriever(url) retval = r.download() if retval[0] == '*': # error situation, do not parse print(retval, '... skipping parse') return Crawler.count = Crawler.count + 1 print('\n(', Crawler.count, ')') print('URL:', url) print('FILE:', retval[0]) self.seen.append(url) links = r.parseAndGetLinks() # get and process links for eachLink in links: if eachLink[:4] != 'http' and \ eachLink.find('://') == -1: eachLink = urljoin(url, eachLink) print ('* ', eachLink,) if eachLink.lower().find('mailto:') != -1: print('... discarded, mailto link') continue if eachLink not in self.seen: if eachLink.find(self.dom) == -1: print('... discarded, not in domain') else: if eachLink not in self.q: self.q.append(eachLink) print('... new, added to Q') else: print('... discarded, already in Q') else: print ('... discarded, already processed') def go(self): # process links in queue while self.q: url = self.q.pop() self.getPage(url) def main(): if len(argv) > 1: url = argv[1] else: try: url = input('Enter starting URL: ') except (KeyboardInterrupt, EOFError): url = '' if not url: return robot = Crawler(url) robot.go() if __name__ == '__main__': main()
运行结果:
Enter starting URL: http://www.null.com/home/index.html
path=www.null.com/home/index.html
( 1 )
URL: http://www.null.com/home/index.html
FILE: www.null.com/home/index.html
* http://www.null.com/rg-erdr.php?_rpo=t nxGRjzr&_rdm=iii.B3aa.JvV&p=J1c4gcvmfN9ffFGoFgXoYVa%7C%40%7CbzGzTE%2C5f95EH%7C%40%7CB3aa.JvV%7C%40%7CfB%7C%40%7C%7C%40%7CEHZzFZHHG%7C%40%7CzHZZFTH%7C%40%7CeL%7C%40%7C19c4NW4cGFHzTEtTEGZGzTbT%7C%40%7Ct+nt07zz8%7C%40%7Ct+7zFZKFH&ga=TQ2niw9CK%2Bp%2BYPMoe5UNB%2Fvf4hdCh2g2rHMlrMH7DRNXAlMr5jNBBUX2%2BdKetQS1fmCDFQ6P46bEjN%2Fchlf8A5iLfYqN9xkNUU1bB4dA%2FdyfJPpOd6%2Fd1fSEIBhDA5mGRnn5x%2B2xpReqK%2BhKov58zfm%2FGUNsbm%2B6d2wUIX12hGVBVPfjOvt772Oa%2FXY4tk%2FL&t=gnojs
... new, added to Q
* http://www.null.com/home/index.html
... discarded, already processed
path=www.null.com/rg-erdr.php
( 2 )
URL: http://www.null.com/rg-erdr.php?_rpo=t nxGRjzr&_rdm=iii.B3aa.JvV&p=J1c4gcvmfN9ffFGoFgXoYVa%7C%40%7CbzGzTE%2C5f95EH%7C%40%7CB3aa.JvV%7C%40%7CfB%7C%40%7C%7C%40%7CEHZzFZHHG%7C%40%7CzHZZFTH%7C%40%7CeL%7C%40%7C19c4NW4cGFHzTEtTEGZGzTbT%7C%40%7Ct+nt07zz8%7C%40%7Ct+7zFZKFH&ga=TQ2niw9CK%2Bp%2BYPMoe5UNB%2Fvf4hdCh2g2rHMlrMH7DRNXAlMr5jNBBUX2%2BdKetQS1fmCDFQ6P46bEjN%2Fchlf8A5iLfYqN9xkNUU1bB4dA%2FdyfJPpOd6%2Fd1fSEIBhDA5mGRnn5x%2B2xpReqK%2BhKov58zfm%2FGUNsbm%2B6d2wUIX12hGVBVPfjOvt772Oa%2FXY4tk%2FL&t=gnojs
FILE: www.null.com/rg-erdr.php
本文介绍了一个基于Python的网络爬虫程序,该程序能够在指定起始URL后抓取网页并解析其中的链接,实现对网站内容的基本遍历。文章详细展示了爬虫的实现代码,并通过实例演示了爬虫的工作流程。

被折叠的 条评论
为什么被折叠?



