用异步协程几分钟爬上千章的小说,让你一次看个够。
import requests
from lxml import etree
import asyncio
import aiohttp
import time
import aiofiles
def get_every_chapter(url):
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36 SLBrowser/9.0.0.10191 SLBChan/105"
}
resp = requests.get(url, headers=head).text
resp_code = resp.encode('iso-8859-1').decode('gbk')
tree = etree.HTML(resp_code)
result = tree.xpath("//div/ul[@class='section-list fix']/li/a/@href")
return result
async def download_chapter(result_list):
tasks = []
for i in result_list:
url = "https://www.biquge635.com/book/40420/" + i
t = asyncio.create_task(download_contain(url))
tasks.append(t)
await asyncio.wait(tasks)
async def download_contain(url):
while True:
try:
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
page_source = await resp.text()
tree = etree.HTML(page_source)
title = tree.xpath("//div[@class='reader-main']/h1/text()")
# content = "\n".join(tree.xpath("//div[@class='content']//text()").replace('\xa0',''))
content = "".join(tree.xpath("//div[@class='content']//text()"))
# print(title)
# print(content)
async with aiofiles.open(f'./藏海花/{title[0]}.txt', mode="w", encoding='utf-8') as wstream:
await wstream.write(content)
print(title[0] + " ----下载完成")
break
except:
print("重来一遍")
def book_get():
url = "https://www.biquge635.com/book/40420/"
result_list = get_every_chapter(url)
loop = asyncio.get_event_loop()
loop.run_until_complete(download_chapter(result_list))
if __name__ == '__main__':
start = time.time()
book_get()
print("爬取时间:", time.time() - start)