网络爬虫信息提取实战之scrapy框架股票信息爬取17

# -*- coding: utf-8 -*-
#stocks.py
import scrapy
import re


class StocksSpider(scrapy.Spider):
    name = 'stocks'
    start_urls = ['http://baidu.com/']

    def parse(self, response):
	for href in response.css('a::attr(href)').extract():
	    try:
		stock = re.findall(r"[s][hz]\d{6}",href)[0]
		url = 'https://gupiao.baidu.com/stock/' + stock + '.html'
		yield scrapy.Request(url,callback = self.parse_stock)
	    except:
                continue
            
    def parser_stock(self,response):
        
        infoDict = {}
        stockInfo = response.css('.stock-bets')
        name = stockInfo.css('bets-name').extract()[0]
        keyList = stockInfo.css('dt').extract()
        valueList = stockInfo.css('dd').extract()
        for i in range(len(keyList)):
            key = re.findall(r'>.*</dt>',keyList[i])[0][1:-5]
            try:
                val = re.findall(r'\d+\.?.*</dd',valueList[i])[0][0:-5]
            except:
                val = '--'
            infoDict[key]=val

        infoDict.update(
            {'股票名称':re.findall('\s.*\(',name)[0].split()[0] + \
             re.findall('\>.*\<',name)[0][1:-1]})
        yield infoDict
            






















# -*- coding: utf-8 -*-
#pipelines.py

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


class BaidustocksPipeline:
    def process_item(self, item, spider):
        return item

class BaidustocksInfoPipeline(object):
    def open_spider(self,spider):
        self.f = open('BaiduStockInfo.txt','w')

    def close_spider(self,spider):
        self.f.close()

    def process_item(self,item,spider):
        try:
            line = str(dict(item)) + '\n'
            self.f.write(line)
        except:
            pass
        return item

 

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值