Scrapy抓取网易新闻

在这里插入图片描述
使用scrapy实现对网易新闻的抓取。详情请看注释。

定义要抓取的字段
# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy


class NewsItem(scrapy.Item):
    # define the fields for your item here like:
    # name = scrapy.Field()
    news_thread = scrapy.Field()
    news_title = scrapy.Field()
    news_url = scrapy.Field()
    news_time = scrapy.Field()
    news_source = scrapy.Field()
    source_url = scrapy.Field()
    news_body = scrapy.Field()

制定抓取规则
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from news.items import NewsItem


class News163Spider(CrawlSpider):
    name = 'news163'
    allowed_domains = ['news.163.com']
    start_urls = ['http://news.163.com/']

    rules = (
        # 参数说明:第一个正则表达式 第二个回调函数 第三个是否允许深入
        # https://news.163.com/20/0723/18/FI87N607000189FH.html 新闻详情页的链接是这样的。
        # 编写一个正则表达式来详细匹配新闻详情页
        # https://news.163.com/20/0723/\d+/.*?html
        # 然后将正则表达式规则传输LinkExtractor中,表示处理该新闻
        Rule(LinkExtractor(allow=r'https://news.163.com/20/0723/\d+/.*?html'), callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        item = NewsItem()
        item['news_thread'] = response.url.strip().split('/')[-1][:-5]
        self.get_title(response, item)
        self.get_time(response, item)
        self.get_source(response, item)
        self.get_source_url(response, item)
        self.get_text(response, item)
        self.get_url(response, item)
        # item['domain_id'] = response.xpath('//input[@id="sid"]/@value').get()
        # item['name'] = response.xpath('//div[@id="name"]').get()
        # item['description'] = response.xpath('//div[@id="description"]').get()
        return item

    def get_url(self, response, item):
        url = response.url
        if url:
            item['news_url'] = url

    def get_time(self, response, item):
        # 类选择器
        time = response.css('div.post_time_source::text').extract()
        if time:
            print("time:{}".format(time[0].strip().replace("来源", "")))
            item['news_time'] = time[0].strip().replace("来源", "").replace("\u3000", "")

    def get_title(self, response, item):
        title = response.css('title::text').extract()
        if title is not None:
            print("title:{}".format(title[0]))
            item['news_title'] = title[0]

    def get_source(self, response, item):
        # id选择器
        source = response.css('#ne_article_source::text').extract()
        if source:
            print("source:{}".format(source[0]))
            item['news_source'] = source[0]

    def get_source_url(self, response, item):
        # id选择器 获取属性
        source_url = response.css('#ne_article_source::attr(href)').extract()
        if source_url:
            print("source_url:{}".format(source_url[0]))
            item['source_url'] = source_url[0]

    def get_text(self, response, item):
        text = response.css('.post_text p::text').extract()
        if text:
            item['news_body'] = text

处理抓取结果
# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exporters import CsvItemExporter


class NewsPipeline(object):

    def __init__(self):
        self.file = open('news_data.csv', 'wb')
        # 这个要根据所爬取的新闻的编码来填写
        self.exporter = CsvItemExporter(self.file, encoding='utf-8')
        # 开始导入
        self.exporter.start_exporting()

    def close_spider(self, spider):
        # 关闭进程
        self.exporter.finish_exporting()
        self.file.close()

    def process_item(self, item, spider):
        self.exporter.export_item(item)
        return item

在setting.py中开启pipeline
# 开启
ITEM_PIPELINES = {
    'news.pipelines.NewsPipeline': 300,
}

这个爬虫并没有将所有的新闻都抓下来,只是抓去了主页中显示的部分。其实可以进一步分析url来实现全站抓取。详情不再阐述。

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值