七月算法课程《python爬虫》第五课: scrapy spider的几种爬取方式

本节课介绍了scrapy的爬虫框架,重点说了scrapy组件spider。

spider的几种爬取方式:

1.爬取1页内容
2.按照给定列表拼出链接爬取多页
3.找到‘下一页’标签进行爬取
4.进入链接,按照链接进行爬取
下面分别给出了示例

1.爬取1页内容
#by 寒小阳(hanxiaoyang.ml@gmail.com)

import scrapy


class JulyeduSpider(scrapy.Spider):
    name = "julyedu"
    start_urls = [
        'https://www.julyedu.com/category/index',
    ]

    def parse(self, response):
        for julyedu_class in response.xpath('//div[@class="course_info_box"]'):
            print julyedu_class.xpath('a/h4/text()').extract_first()
            print julyedu_class.xpath('a/p[@class="course-info-tip"][1]/text()').extract_first()
            print julyedu_class.xpath('a/p[@class="course-info-tip"][2]/text()').extract_first()
            print response.urljoin(julyedu_class.xpath('a/img[1]/@src').extract_first())
            print "\n"

            yield {
                'title':julyedu_class.xpath('a/h4/text()').extract_first(),
                'desc': julyedu_class.xpath('a/p[@class="course-info-tip"][1]/text()').extract_first(),
                'time': julyedu_class.xpath('a/p[@class="course-info-tip"][2]/text()').extract_first(),
                'img_url': response.urljoin(julyedu_class.xpath('a/img[1]/@src').extract_first())
            }

2.按照给定列表拼出链接爬取多页
#by 寒小阳(hanxiaoyang.ml@gmail.com)

import scrapy


class CnBlogSpider(scrapy.Spider):
    name = "cnblogs"
    allowed_domains = ["cnblogs.com"]
    start_urls = [
        'http://www.cnblogs.com/pick/#p%s' % p for p in xrange(1, 11)
        ]

    def parse(self, response):
        for article in response.xpath('//div[@class="post_item"]'):
            print article.xpath('div[@class="post_item_body"]/h3/a/text()').extract_first().strip()
            print response.urljoin(article.xpath('div[@class="post_item_body"]/h3/a/@href').extract_first()).strip()
            print article.xpath('div[@class="post_item_body"]/p/text()').extract_first().strip()
            print article.xpath('div[@class="post_item_body"]/div[@class="post_item_foot"]/a/text()').extract_first().strip()
            print response.urljoin(article.xpath('div[@class="post_item_body"]/div/a/@href').extract_first()).strip()
            print article.xpath('div[@class="post_item_body"]/div[@class="post_item_foot"]/span[@class="article_comment"]/a/text()').extract_first().strip()
            print article.xpath('div[@class="post_item_body"]/div[@class="post_item_foot"]/span[@class="article_view"]/a/text()').extract_first().strip()
            print ""

            yield {
                'title': article.xpath('div[@class="post_item_body"]/h3/a/text()').extract_first().strip(),
                'link': response.urljoin(article.xpath('div[@class="post_item_body"]/h3/a/@href').extract_first()).strip(),
                'summary': article.xpath('div[@class="post_item_body"]/p/text()').extract_first().strip(),
                'author': article.xpath('div[@class="post_item_body"]/div[@class="post_item_foot"]/a/text()').extract_first().strip(),
                'author_link': response.urljoin(article.xpath('div[@class="post_item_body"]/div/a/@href').extract_first()).strip(),
                'comment': article.xpath('div[@class="post_item_body"]/div[@class="post_item_foot"]/span[@class="article_comment"]/a/text()').extract_first().strip(),
                'view': article.xpath('div[@class="post_item_body"]/div[@class="post_item_foot"]/span[@class="article_view"]/a/text()').extract_first().strip(),
            }

3.找到‘下一页’标签进行爬取
import scrapy


class QuotesSpider(scrapy.Spider):
    name = "quotes"
    start_urls = [
        'http://quotes.toscrape.com/tag/humor/',
    ]

    def parse(self, response):
        for quote in response.xpath('//div[@class="quote"]'):
            yield {
                'text': quote.xpath('span[@class="text"]/text()').extract_first(),
                'author': quote.xpath('span/small[@class="author"]/text()').extract_first(),
            }

        next_page = response.xpath('//li[@class="next"]/@herf').extract_first()
        if next_page is not None:
            next_page = response.urljoin(next_page)
            yield scrapy.Request(next_page, callback=self.parse)
4.进入链接,按照链接进行爬取
#by 寒小阳(hanxiaoyang.ml@gmail.com)

import scrapy


class QQNewsSpider(scrapy.Spider):
    name = 'qqnews'
    start_urls = ['http://news.qq.com/society_index.shtml']

    def parse(self, response):
        for href in response.xpath('//*[@id="news"]/div/div/div/div/em/a/@href'):
            full_url = response.urljoin(href.extract())
            yield scrapy.Request(full_url, callback=self.parse_question)

    def parse_question(self, response):
        print response.xpath('//div[@class="qq_article"]/div/h1/text()').extract_first()
        print response.xpath('//span[@class="a_time"]/text()').extract_first()
        print response.xpath('//span[@class="a_catalog"]/a/text()').extract_first()
        print "\n".join(response.xpath('//div[@id="Cnt-Main-Article-QQ"]/p[@class="text"]/text()').extract())
        print ""
        yield {
            'title': response.xpath('//div[@class="qq_article"]/div/h1/text()').extract_first(),
            'content': "\n".join(response.xpath('//div[@id="Cnt-Main-Article-QQ"]/p[@class="text"]/text()').extract()),
            'time': response.xpath('//span[@class="a_time"]/text()').extract_first(),
            'cate': response.xpath('//span[@class="a_catalog"]/a/text()').extract_first(),
        }
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值