import asyncio
import aiohttp
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36'
}
urls = [
"https://cdn.pixabay.com/photo/2021/11/16/08/13/window-6800397_960_720.jpg",
"https://cdn.pixabay.com/photo/2021/01/29/11/33/game-5960731_960_720.jpg",
"https://cdn.pixabay.com/photo/2021/11/13/12/19/cochem-castle-6791148_960_720.jpg"
]
async def get_request(url):
name = url.rsplit("/", 1)[1]
async with aiohttp.ClientSession() as session:
async with session.get(url=url,headers = headers) as response:
with open(name, mode='wb') as f:
f.write(await response.read())
if __name__ == '__main__':
tasks = []
for url in urls:
c = get_request(url)
task = asyncio.ensure_future(c)
tasks.append(task)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
【Python】爬虫-----异步http请求下载图片
最新推荐文章于 2024-06-07 13:23:59 发布