单线程爬取小说简单又不复杂ฅ( ̳• ◡ • ̳)ฅ,以前写的爬虫程序爬个800来章就会崩了,这次优化了一下,爬到整本小说完为止˶´⚰︎`˵
import requests
import parsel
# 模拟浏览器
# Cookie和User-Agent要自己动手喔ฅฅ*~
headers = {
"Cookie": "",
"User-Agent": ""
}
home_url = ''
# 构造选择器
def get_selector(url, headers):
response = requests.get(url, headers=headers)
response.encoding = response.apparent_encoding
html = response.text
selector = parsel.Selector(html)
return selector
home_selector = get_selector(home_url, headers)
# 小说名称
# xpath路径要自己动手喔ฅฅ*~
novel_name = home_selector.xpath('').get()
# 小说章节id
# xpath路径要自己动手喔ฅฅ*~
chapters_ids = home_selector.xpath('').getall()
# 记录程序发生错误时的章节id下标
end_index = 0
def run_spider(chapters_ids):
try:
chapters_ids = chapters_ids[end_index:]
for link in chapters_ids:
link = home_url + link
html_selector = get_selector(link, headers)
# 章节名称
# xpath路径要自己动手喔ฅฅ*~
title = html_selector.xpath('').get()
# 章节内容
# xpath路径要自己动手喔ฅฅ*~
content = html_selector.xpath('').getall()
content = '\n'.join(content)
print(title + '保存中...')
with open(novel_name + '.txt', 'a', encoding='utf-8') as f:
f.write(title)
f.write('\n')
f.write(content)
f.write('\n')
end_index = end_index + 1
print(title + '保存完毕')
except:
print('┭┮﹏┭┮ 程序出错了呜呜呜呜~~~~')
print('马上重启!(*>.<*)')
run_spider(chapters_ids)
run_spider(end_index)