基本操作:python—scrapy数据解析、存储_郑*杰的博客-CSDN博客 一、爬取不同页面但数据解析相同的数据
当前文件:D:\python_test\scrapyProject\scrapyProject\spiders\first.py
import scrapy
from ..items import ScrapyprojectItem
class FirstSpider(scrapy.Spider):
name = 'first'
start_urls = ['https://pic.netb**ian.com/4kmeinv/']
page_num=2
def parse(self, response):
list_li = response.xpath('//*[@id="main"]/div[3]/ul/li')
for li in list_li:
title = li.xpath('.//b/text()').extract_first()
title_url = 'https://pic.netbian.com/'+li.xpath('.//img/@src').extract_first()
item = ScrapyprojectItem()
item['img_url']=title_url
item['img_title']=title
yield item
if self.page_num<4:
new_url = f'https://pic.netb**ian.com/4kmeinv/index_{self.page_num}.html'
self.page_num += 1
# 如果爬取的页面的数据解析是一样的,直接调用回自己即可
yield scrapy.Request(url=new_url,callback=self.parse)
二、爬取不同页面,且数据解析不同的页面
当前文件:D:\python_test\scrapyProject\scrapyProject\spiders\sec.py
# 爬取不同页面的数据
import scrapy
from ..items import politicsNewest
class FirstSpider(scrapy.Spider):
name = 'sec'
start_urls = ['https://wz.sun**0769.com/political/index/politicsNewest']
page_num=2
def parse(self, response):
list_li = response.xpath('/html/body/div[2]/div[3]/ul[2]/li')
for a in list_li:
title = a.xpath('./span[3]/a/text()').extract_first()
title_url = 'https://wz.sun0769.com/'+a.xpath('./span[3]/a/@href').extract_first()
item = politicsNewest()
item['content_title'] = title
# meta 可以将item对象传给callback
# 当页面数据解析不同时,再写一个函数进行回调解析即可
yield scrapy.Request(url=title_url,callback=self.parse_detail,meta={'item':item})
if self.page_num<3:
new_url = f'https://wz.sun**0769.com/political/index/politicsNewest?id=1&page={self.page_num}'
print(new_url)
self.page_num += 1
yield scrapy.Request(url=new_url,callback=self.parse)
def parse_detail(self, response):
meta = response.meta # 接收上面传过来的meta字典
item = meta['item']
content = response.xpath('/html/body/div[3]/div[2]/div[2]/div[2]//text()').extract()
content = ''.join(content).strip()
item['content_data'] = content
yield item