思路
1.先写同步代码,遇到多个url时,创建异步任务。
2.保存到txt文件
导包
import requests
from lxml import etree
import asyncio
import aiohttp, aiofiles
import os
得到url列表
async def get_urls(url, headers):
resp = requests.get(url=url, headers=headers)
resp.encoding = 'utf-8'
tree = etree.HTML(resp.text)
dd_list = tree.xpath('//div[@id="list"]/dl/dd')
tasks = []
# if not os.path.exists('./小说'):
# os.mkdir('./小说')
async with aiohttp.ClientSession(headers=headers) as session:
for dd in dd_list:
href = f'http://www.xbiquge.la' + dd.xpath('./a/@href')[0]
title = dd.xpath('./a/text()')[0]
tasks.append(asyncio.create_task(get_content(href, title, session)))
await asyncio.wait(tasks)
# 得到url列表,并创建异步任务
获取每章的文本内容
async def get_content(url, title, session):
async with session.get(url) as resp:
#print(await resp.text())
html = await resp.text(encoding='utf-8')
tree = etree.HTML(html)
content = tree.xpath('string(//div[@id="content"])')[:].replace(' ', '')
# print(dic)
async with aiofiles.open(f'./小说/{title}.txt', mode='w', encoding='utf-8') as file:
# if not os.path.exists(f'./小说/{title}.txt'):
await file.write(content)
# else:
# print(title + '\t' + '已存在')
# 异步解析得到的url,并写入txt文件
主程序
if __name__ == '__main__':
url = 'http://www.xbiquge.la/10/10489/'
asyncio.run(get_urls(url))