第一个网络爬虫

<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);"></span><pre name="code" class="python">#!/usr/bin/env python
#test  use bbs.stuhome.net
from sys import argv
from os import makedirs, unlink, sep
from os.path import isdir, exists, dirname, splitext
from string import replace, find, lower
from htmllib import HTMLParser
from urllib import urlretrieve
from urlparse import urlparse, urljoin
from formatter import DumbWriter, AbstractFormatter
from cStringIO import StringIO

class Retriever(object):    # download Web pages

    def __init__(self, url):
        self.url = url
        self.file = self.filename(url)

    def filename(self, url, deffile='index.htm'):#html?htm_________________________________
        parsedurl = urlparse(url, 'http:', 0)  # ParseResult(scheme='http', netloc='www.yangqq.com', path='/jstt/bj/2015-01-09/740.html', params='', query='', fragment='')

        path = parsedurl[1] + parsedurl[2]#www.yangqq.com/jstt/bj/2015-01-09/740.html
        ext = splitext(path)  #ext = ('www.yangqq.com/jstt/bj/2015-01-09/740', '.html')
        if ext[1] == '':#%%%%%%%%%%%%%%%%%%%%%%%%csdn's blog%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
            if path[-1] == '/':
                path += deffile
            else:
                path += '/' + deffile#比如直接输入'lyb',因为不是网址,则成为lyb/index.htm
        ldir = dirname(path)    # www.yangqq.com/jstt/bj/2015-01-09
        print path
        print ldir,'_____________________________________________________'
        if sep != '/':        # os-indep. path separator 将/换成系统分隔符,这样可以在多平台创建文件夹,比如windows的'/'和linux的'\'
            ldir = replace(ldir, '/', sep)
        if not isdir(ldir):      # create archive dir if nec.
            if exists(ldir): unlink(ldir)
            makedirs(ldir)
        return path #ldir用来创建本地文件夹,路径用来返回

    def download(self):        # download Web page
        try:
            retval = urlretrieve(self.url, self.file)#
        except IOError:
            retval = ('*** ERROR: invalid URL "%s"' % \
                self.url, )
            print retval,'________________'
        return retval

    def parseAndGetLinks(self):    # parse HTML, save links
        self.parser = HTMLParser(AbstractFormatter( \
            DumbWriter(StringIO())))
        self.parser.feed(open(self.file).read())
        self.parser.close()
        return self.parser.anchorlist#http://blog.chinaunix.net/uid-25799257-id-3080103.html

class Crawler(object):        # manage entire crawling process

    count = 0            # static downloaded page counter

    def __init__(self, url):#url= http://www.yangqq.com/jstt/bj/2015-01-09/740.html
        self.q = [url]
        self.seen = []
        self.dom = urlparse(url)[1]#www.yangqq.com

    def getPage(self, url):
        r = Retriever(url)
        retval = r.download()
        #print '_______________',retval,'   retval[0][0] = ',retval[0][0],'__________________'#原书用retval[0]是错误的
        if retval[0][0] == '*':     # error situation, do not par"%s
                     #   print retval, '... skipping parse'
            return
        Crawler.count = Crawler.count + 1
        print '\n(', Crawler.count, ')'
        print 'URL:', url
        print 'FILE:', retval[0]
        self.seen.append(url)

        links = r.parseAndGetLinks()  # get and process links
        for eachLink in links:
            if eachLink[:4] != 'http' and \
                    find(eachLink, '://') == -1:
                eachLink = urljoin(url, eachLink)
            print '* ', eachLink,

            if find(lower(eachLink), 'mailto:') != -1:
                print '... discarded, mailto link'
                continue

            if eachLink not in self.seen:
                if find(eachLink, self.dom) == -1:
                    print '... discarded, not in domain'
                else:
                    if eachLink not in self.q:
                        self.q.append(eachLink)
                        print '... new, added to Q'
                    else:
                        print '... discarded, already in Q'
            else:
                    print '... discarded, already processed'

    def go(self):                # process links in queue
        while self.q:
            url = self.q.pop()
            print '@@@@@@@@@@@@@@@@@@@pop'
            self.getPage(url)

def main():
    if len(argv) > 1:
        url = argv[1]
    else:
        try:
            url = raw_input('Enter starting URL: ')
        except (KeyboardInterrupt, EOFError):
            url = ''

    if not url: return
    robot = Crawler(url)
    robot.go()

if __name__ == '__main__':
    main()


 
<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);">
</span>
<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);">24行用csdn博客测试时候发现不能下载,原来csdn反爬虫。</span>
<span style="font-family: Arial, Helvetica, sans-serif; background-color: rgb(255, 255, 255);">一篇代码解析:http://www.cnblogs.com/zhengyuxin/articles/1943167.html或http://www.cnblogs.com/little-white/p/3471499.html</span>

关于parseAndGetLinks()中AbstractFormatter的解析:http://blog.chinaunix.net/uid-25799257-id-3080103.html

更详细的关于AbstractFormatter 的解析:http://www.linuxqq.net/archives/710.html

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值