<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);"></span><pre name="code" class="python">#!/usr/bin/env python
#test use bbs.stuhome.net
from sys import argv
from os import makedirs, unlink, sep
from os.path import isdir, exists, dirname, splitext
from string import replace, find, lower
from htmllib import HTMLParser
from urllib import urlretrieve
from urlparse import urlparse, urljoin
from formatter import DumbWriter, AbstractFormatter
from cStringIO import StringIO
class Retriever(object): # download Web pages
def __init__(self, url):
self.url = url
self.file = self.filename(url)
def filename(self, url, deffile='index.htm'):#html?htm_________________________________
parsedurl = urlparse(url, 'http:', 0) # ParseResult(scheme='http', netloc='www.yangqq.com', path='/jstt/bj/2015-01-09/740.html', params='', query='', fragment='')
path = parsedurl[1] + parsedurl[2]#www.yangqq.com/jstt/bj/2015-01-09/740.html
ext = splitext(path) #ext = ('www.yangqq.com/jstt/bj/2015-01-09/740', '.html')
if ext[1] == '':#%%%%%%%%%%%%%%%%%%%%%%%%csdn's blog%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if path[-1] == '/':
path += deffile
else:
path += '/' + deffile#比如直接输入'lyb',因为不是网址,则成为lyb/index.htm
ldir = dirname(path) # www.yangqq.com/jstt/bj/2015-01-09
print path
print ldir,'_____________________________________________________'
if sep != '/': # os-indep. path separator 将/换成系统分隔符,这样可以在多平台创建文件夹,比如windows的'/'和linux的'\'
ldir = replace(ldir, '/', sep)
if not isdir(ldir): # create archive dir if nec.
if exists(ldir): unlink(ldir)
makedirs(ldir)
return path #ldir用来创建本地文件夹,路径用来返回
def download(self): # download Web page
try:
retval = urlretrieve(self.url, self.file)#
except IOError:
retval = ('*** ERROR: invalid URL "%s"' % \
self.url, )
print retval,'________________'
return retval
def parseAndGetLinks(self): # parse HTML, save links
self.parser = HTMLParser(AbstractFormatter( \
DumbWriter(StringIO())))
self.parser.feed(open(self.file).read())
self.parser.close()
return self.parser.anchorlist#http://blog.chinaunix.net/uid-25799257-id-3080103.html
class Crawler(object): # manage entire crawling process
count = 0 # static downloaded page counter
def __init__(self, url):#url= http://www.yangqq.com/jstt/bj/2015-01-09/740.html
self.q = [url]
self.seen = []
self.dom = urlparse(url)[1]#www.yangqq.com
def getPage(self, url):
r = Retriever(url)
retval = r.download()
#print '_______________',retval,' retval[0][0] = ',retval[0][0],'__________________'#原书用retval[0]是错误的
if retval[0][0] == '*': # error situation, do not par"%s
# print retval, '... skipping parse'
return
Crawler.count = Crawler.count + 1
print '\n(', Crawler.count, ')'
print 'URL:', url
print 'FILE:', retval[0]
self.seen.append(url)
links = r.parseAndGetLinks() # get and process links
for eachLink in links:
if eachLink[:4] != 'http' and \
find(eachLink, '://') == -1:
eachLink = urljoin(url, eachLink)
print '* ', eachLink,
if find(lower(eachLink), 'mailto:') != -1:
print '... discarded, mailto link'
continue
if eachLink not in self.seen:
if find(eachLink, self.dom) == -1:
print '... discarded, not in domain'
else:
if eachLink not in self.q:
self.q.append(eachLink)
print '... new, added to Q'
else:
print '... discarded, already in Q'
else:
print '... discarded, already processed'
def go(self): # process links in queue
while self.q:
url = self.q.pop()
print '@@@@@@@@@@@@@@@@@@@pop'
self.getPage(url)
def main():
if len(argv) > 1:
url = argv[1]
else:
try:
url = raw_input('Enter starting URL: ')
except (KeyboardInterrupt, EOFError):
url = ''
if not url: return
robot = Crawler(url)
robot.go()
if __name__ == '__main__':
main()
<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);">
</span>
<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);">24行用csdn博客测试时候发现不能下载,原来csdn反爬虫。</span>
<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);">一篇代码解析:http://www.cnblogs.com/zhengyuxin/articles/1943167.html或http://www.cnblogs.com/little-white/p/3471499.html</span>
关于parseAndGetLinks()中AbstractFormatter的解析:http://blog.chinaunix.net/uid-25799257-id-3080103.html
更详细的关于AbstractFormatter 的解析:http://www.linuxqq.net/archives/710.html