scrapy爬取页面并按目录结构存放页面

给大家共享一段代码,希望对发家有帮助。 该代码的作用是将指定站点的页面递归的爬取,并按照目录结构存放爬取结果:

<pre><code> from scrapy.selector import HtmlXPathSelector from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.http import Request from scrapy import log import os, os.path class BookSpider(CrawlSpider): name = 'Book' #allowed_domains = ['xx.com'] #start_urls = ['http://www.xx.com/'] #rules = ( # Rule(SgmlLinkExtractor(allow=r'\.html'), callback='parse_item', follow=True), #) def __init__(self, start_url, output_dir = "./", *args, **kwargs): super(BookSpider, self).__init__(*args, **kwargs) self.start_urls = [] self.start_urls.append(start_url) self.output_dir = output_dir self.allowed_domains = map(self._get_domain, self.start_urls) def _get_domain(self, url): first_dot = url.find('.') if -1 == first_dot: return None first_slash = url.find('/', first_dot + 1) if -1 == first_slash: return url[first_dot + 1:] return url[first_dot + 1: first_slash] def parse(self, response): """first Request return to fetch start_url""" self.parse_detail(response) yield Request(response.url, callback = self.parse_item) def parse_item(self, response): page_links = SgmlLinkExtractor(allow=r'\.html').extract_links(response) """ iterate two times for BFS; one for DFS""" for link in page_links: yield Request(link.url, callback = self.parse_detail) for link in page_links: yield Request(link.url, callback = self.parse_item) def parse_detail(self, response): outputfile = self._rtouch(response.url) if not outputfile: log.msg("download %s fail" % response.url, level = log.WARNING, spider = self) return with open(outputfile, 'w') as f: f.write(response.body) log.msg("download file: %s" % outputfile, level = log.INFO, spider = self) def _rtouch(self, filepath): pos = filepath.find('://') if -1 != pos: filepath = filepath[pos + 3:] if ".html" != filepath[-5:]: filepath += "/index.html" opath = os.path.abspath(self.output_dir + "/" + filepath) basedir = os.path.dirname(opath) if not os.path.exists(basedir): try: os.makedirs(basedir) except Exception, msg: log.msg(msg, level = log.WARNING, spider = self) return None return opath </code></pre>

转载于:https://my.oschina.net/HappyRoad/blog/173510

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值