# -*- coding: utf-8 -*-
import scrapy
from douluodalu.items import DouluodaluItem
class DlSpider(scrapy.Spider):
name = 'dl'
start_urls = ['https://m.lread.net/read/401/']
def parse(self, response):
#章节名字和网址的上一级xpath
# 这里的item是一个全局变量,意思它带到下一层去,只能是在最外面去返回到下个函数
all_list = response.xpath('//*[@id="chapterlist"]//p')[1:-9]
for title_url in all_list:
item =DouluodaluItem()
# item={}
each_title_url = title_url.xpath('.//a/@href').extract()
title_finish_url = "https://m.lread.net/" + ','.join(each_title_url)
each_title = ','.join(title_url.xpath('.//a//text()').extract())
item['title']=each_title
item['url']=title_finish_url
yield scrapy.Request(url=title_finish_url, callback=self.detail,
meta={'item': item})
def detail(self,response):
if 'content' not in response.meta['item']:
item = response.meta['item']
content = response.xpath('//*[@id="novelcontent"]/p/text()').getall()
# all = []
# for i in content:
# line = i.strip()
# all.append(line)
# all_detail = ''.join(all)
all_detail = ''.join(content)
item['content'] = all_detail
small_url = response.xpath('//div/div/ul/li[4]/a/text()').extract_first()
next_url = response.xpath('//*[@id="pt_next"]/@href').extract_first()
next_url = 'https://m.lread.net/' + next_url
if (small_url=="下页 >"):
yield scrapy.Request(url=next_url, callback=self.detail, meta={'item': item})
else:
yield item
else:
item= response.meta['item']
content = response.xpath('//*[@id="novelcontent"]/p/text()').getall()
# all = []
# for i in content:
# line = i.strip()
# all.append(line)
# all_detail = ''.join(all)
all_detail = ''.join(content)
quanbu = item['content'] + all_detail
item['content'] = quanbu
small_url = response.xpath('//div/div/ul/li[4]/a/text()').extract_first()
next_url = response.xpath('//*[@id="pt_next"]/@href').extract_first()
next_url = 'https://m.lread.net/' + next_url
if (small_url == "下页 >"):
yield scrapy.Request(url=next_url, callback=self.detail, meta={'item': item})
else:
yield item
爬虫爬取一本完整的小说 《斗罗大陆》
最新推荐文章于 2020-11-13 10:35:22 发布