代码
使用scrapy的时候记得加上请求头,False掉robots协议,打开管道。
import scrapy
from . import data_solve
class ShanghaispiderSpider(scrapy.Spider):
name = 'shanghaiSpider'
allowed_domains = ['wsjkw.sh.gov.cn']
start_urls = ['https://wsjkw.sh.gov.cn/xwfb/index.html']
i = 2
page = 25
def parse(self, response):
li_list = response.xpath("//ul[@class='uli16 nowrapli list-date ']/li")
for k in li_list:
info = {}
info['title'] = li_list.xpath("./a/@title").extract()
info['url'] = li_list.xpath("./a/@href").extract()
info['time'] = li_list.xpath("./span/text()").extract()
next_url = 'https://wsjkw.sh.gov.cn/xwfb/index_' + str(self.i) + '.html'
self.i += 1
if self.i >= self.page: #到达页数就结束
return
for k in range(0, len(info['title'])):
if (self.content_filter(info['title'][k])):
#print(info['title'][k], info['url'][k], info['time'][k])
yield scrapy.Request(
'https://wsjkw.sh.gov.cn' + info['url'][k],
callback = self.parse_detail,
meta = {'time': info['time'][k], 'title':info['title'][k]}
)
yield scrapy.Request(
next_url,
callback = self.parse
)
def content_filter(self, i):
keyword1 = '新冠肺炎确诊病例'
keyword2 = '新增'
keyword3 = '境外输入'
if keyword1 in i and keyword2 in i and keyword3 in i:
return True
else:
return False
def parse_detail(self, response):
time = response.meta['time']
title = response.meta['title']
t = response.xpath("//div[@id='ivs_content']//span/text()").extract()
pt = ("".join(t))
#print(pt)
print(time, end='')
print(data_solve.datas.solve_data(self, pt))
def local_new(self, strs):
pass
data_solve是一个笨蛋写的,太复杂了。之后学习一下re模块再优化一下。