异步获取内容更加高效快速,面对大量图片文字等文件数据,可以使用异步协程获取的方式
import asyncio import aiohttp import aiofiles # 异步保存的文件 from datetime import datetime from lxml import etree print(datetime.now()) async def get_page(url, session): response = await session.get(url) resText = await response.text() html = etree.HTML(resText) return html async def get_img_info(html): img_names = [] img_urls = [] for i in range(1, 61): img_name = html.xpath(f'//ul[@id="img-list-outer"]/li[{i}]/a/img/@alt') img_url = html.xpath(f'//ul[@id="img-list-outer"]/li[{i}]/a/img/@data-original') img_names.append(img_name[0]) img_urls.append('https:' + img_url[0]) return img_names, img_urls async def save_img(img_info,session): imgnames, imgs = img_info for i in range(len(imgs)): async with aiofiles.open('美女\\' + imgnames[i] + '.jpg', 'wb') as f: async with session.get(imgs[i]) as img_res: img = await img_res.read() await f.write(img) async def main(): urls = [] for i in range(1,11): url = f'https://soso.nipic.com/?q=%E7%BE%8E%E5%A5%B3&page={i}' urls.append(url) async with aiohttp.ClientSession() as session: res = await asyncio.wait([get_page(url,session)for url in urls]) res2 = await asyncio.wait([get_img_info(html.result()) for html in res[0]]) await asyncio.wait([save_img(info.result(),session) for info in res2[0]]) asyncio.run(main()) print(datetime.now())