Scrapy 爬虫项目实战之新浪网分类质讯

需求:

		获取所有对应的大类的标题和 urls、子类的标题和 urls、子链接 urls

items.py

# -*- coding: utf-8 -*-

import Scrapy
import sys
reload(sys)
sys.setdefaultencoding("utf-8") 

class SinaItem(scrapy.Item):
    partenTitle = scrapy.Field()
    partenUrls = scrapy.Field()
    subTitle = scrapy.Field()
    subUrls = scrapy.Field()
    subFilename = scrapy.Field()
    sonUrls = scrapy.Field()
    head = scrapy.Field()
    content = scrapy.Field()

pipelines.py

# -*- coding: utf-8 -*-

from scrapy import singals
import sys

class SinaPipeline(object):
    def process_item(self, item, spider):
        sonUrls = item['sonUrls']
        filename = sonUrls[7:-6].replace('/','_')
        filename += ".txt"
        fp = open(item['subFilename'] + '/' + filename, 'w')
        fp.write(item['content'])
        fp.close()
        return item
        

settings.py

# -*- coding: utf-8 -*-

BOT_NAME = 'Sina'
SPIDER_MODULES = ['Sina.spiders']
NEWSPIDER_MOUDLE = 'Sina.spiders'

ITEM_PIPELINES = {
    'Sina.pipelines.SinaPipeline': 500,
}

LOG_LEVEL = 'DEBUG'

spiders.py

# -*- coding: utf-8 -*-

from  Sina.items import SinaItem
import scrapy
import os
import sys

class SinaSpider(scrapy.Spider):
    name = "sina"
    allowed_domains = ["sina.com.cn"]
    start_urls = [
        "http://news.sina.cn/guide"
    ]

    def parse(self, response):
        ''' 解析所有大类,小类URL和Title '''
        items = []
        parentUrls = response.xpath('//div[@id=\"tab01\"]/div/h3/a/@href').extract()
        parentTitle = response.xpath('//div[@id=\"tab01\"]/div/h3/a/text()').extract()
        subtUrls = response.xpath('//div[@id=\"tab01\"]/div/ul/li/a/@href').extract()
        subTitle = response.xpath('//div[@id=\"tab01\"]/div/ul/li/a/text()').extract()

        # 爬取所有大类
        for i in range(0, len(parentTitle)):
            parentFilename = "./Data" + parentTitle[i]
            if not os.path.exists(parentFilename):
                os.mkdirs(parentFilename)
        
        # 爬取所有小类
        for j in range(0, len(subtUrls)):
            item = SinaItem()

            # 保存大类URLS和Title
            item['parentTitle'] = parentTitle[i]
            item['parentUrls'] = parentUrls[i]

        # 检查小类是否以同类别大类 URL 开头,如果是则返回True
        if_belong = subtUrls[j].startwith(item['parentUrls'])

        # 如果属于大类,将存储目录放在该大类目录下
        if if_belong:
            subFilename = parentFilename + '/' + subTitle[j]
            if not os.path.exists(subFilename):
                os.mkdir(subFilename)
            
            # 存储小类url,title,filename 字段数据
            item['subUrls'] = subtUrls[j]
            item['subTitle'] = subTitle[j]
            item['subFilename'] = subFilename

            items.append(item)

        # 发送每个小类子链接URL的Request,将得到的response和meta 数据一起交给回调函数detail_parse处理
        for item in items:
            yield scrapy.Request(url=item['subUrls'], meta={'meta_1':item}, callable=self.second_parse)

    # 对于返回小类的url 再次进行递归请求
    def second_parse(self, response):
        ''' 提取数据和所有小类子链接 '''
        meta_1 = response.meta['meta_1']
        sonUrls = response.xpath('//a/@href').extract()

        items = []
        for i in range(0, len(sonUrls)):

            # 检查每个链接是否以大类URL 开头,以HTML 结尾,如果是则返回True
            if_belong = sonUrls[i].endswith('.shtml') and sonUrls[i].startwith(meta_1['parentUrls'])
            
            # 如果属于大类,获取字段值放在同一个item 下方便传输
            if if_belong:
                item = SinaItem()
                item['parentTitle'] = meta_1['parentTitle']
                item['parentUrls'] = meta_1['parentUrls']
                item['subUrls'] = meta_1['subUrls']
                item['subTitle'] = meta_1['subTitle'] 
                item['subFilename'] = meta_1['subFilename']
                item['sonUrls'] = sonUrls[i]
                items.append(item)

        # 发送每个小类子链接URL的Request,将得到的response和meta 数据一起交给回调函数detail_parse 方法处理
        for item in items:
            yield scrapy.Request(url=item['sonUrls'], meta={'meta_2':item}, callable=self.detail_parse)

        def detail_parse(self, response): 
            ''' 解析获取文章标题和内容 '''
            item = response.meta['meta_2']
            content = ''
            head = response.xpath('//h1[@id=\"main_title\"].text()')
            content_list = response.xpath('//div[@id=\"artibody\"]/p/text()').extract()

            # 将 P 标签的文本内容合并到一起
            for content_1 in content_list:
                content += content_1

            item['head'] = head
            item['content'] = content
            yield item
            



    



        


  • 0
    点赞
  • 8
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值