# 多协程使用
import asyncio
import time
# async def fun1():
# print('你好,我是潘金莲')
# await asyncio.sleep(3)
# print('你好,我叫潘金莲')
#
# async def fun2():
# print('你好,我是狄仁杰')
# await asyncio.sleep(2)
# print('你好,我叫狄仁杰')
#
# async def fun3():
# print('你好,我是李元芳')
# await asyncio.sleep(4)
# print('你好,我叫李元芳')
#
#
# async def main():
# tasks = [
# fun1(),
# fun2(),
# fun3()
# ]
# await asyncio.gather(*tasks)
# pass
#
# if __name__ == '__main__':
# t1 = time.time()
# # 一次性启动多个任务(协程)
# asyncio.run(main())
# t2 = time.time()
# print(t2-t1)
# 在爬虫领域的使用
async def download(url):
print('开始下载')
await asyncio.sleep(2) # 网络请求
print('下载完成')
async def main():
urls = [
'http://www.baidu.com',
'http://www.163.com'
]
tasks = []
for url in urls:
d = download(url)
tasks.append(d)
await asyncio.gather(*tasks)
if __name__ == '__main__':
asyncio.run(main())
实战 图片下载
# requests.get(),同步的代码 ->异步操作
import asyncio
import aiohttp
urls = [
'http://kr.shanghai-jiuxin.com/file/2021/0112/22bb24bf6176209f02439463c408fa58.jpg',
'http://kr.shanghai-jiuxin.com/file/2021/0112/3de4facfc886317dece22a09ec4a2343.jpg',
'http://kr.shanghai-jiuxin.com/file/2021/0112/a6a25f86d932716142aa418c9c2bf119.jpg'
]
# s = aiohttp.ClientSession() # 等价于requests
# 发送请求
# 得到图片
# 保持图片
async def aiodownload(url):
name = url.rsplit('/',1)[1] # resplit从末尾开始切
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
# 请求回来,写入文件
with open(name,mode='wb') as f:
f.write(await resp.content.read()) # 读取内容是异步的,需要await挂起
# resp.content.read() # 等价于resp.content
# resp.text() #等价于resp.text
# resp.json() #等价于resp.json()
print(name,'搞定')
pass
async def main():
tasks = []
for url in urls:
tasks.append(aiodownload(url))
await asyncio.gather(*tasks)
pass
if __name__ == '__main__':
asyncio.run(main())
多协程的使用
最新推荐文章于 2024-06-18 00:22:55 发布