# coding:utf-8
# __auth__ = "maiz"
import os
import re
import random
import asyncio
import aiofiles
import aiohttp
from datetime import datetime
from lxml import etree
class Spider(object):
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36'
}
download_folder = "./videos"
urls = []
async def main(self):
await self._get_video_urls()
downloader = [asyncio.create_task(self._download_video(filename, url)) for filename, url in self.urls]
await asyncio.gather(*downloader)
async def _get_video_urls(self):
url = 'https://www.pearvideo.com/category_5'
async with aiohttp.ClientSession(headers=self.headers) as session:
async with session.get(url) as response:
if response.status == 200:
text = await response.text()
tree = etree.HTML(text)
lis = tree.xpath('//ul[@id="categoryList"]/li')
else:
raise aiohttp.ClientResponseError
spider = [self._parse_video_url(li) for li in lis]
await asyncio.wait(spider)
async def _parse_video_url(self, li):
title = li.xpath('./div/a/div[2]/text()')[0].strip('“”!?').replace("| ", "").replace(" | ", "")
page = str(li.xpath('./div/a/@href')[0]).split('_')[1]
ajax_url = 'https://www.pearvideo.com/videoStatus.jsp?'
params = {'contId': page, 'mrd': random.random()}
headers = self.headers.copy()
headers.update({'Referer': 'https://www.pearvideo.com/video_' + page})
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(ajax_url, params=params) as response:
ajax_text = await response.json()
download_url = ajax_text["videoInfo"]['videos']["srcUrl"]
download_url = re.sub(r"\d{13}", f"cont-{page}", download_url)
self.urls.append((title + ".mp4", download_url))
async def _download_video(self, filename: str, url: str):
async with aiohttp.ClientSession(headers=self.headers) as session:
print(f"开始下载 => {filename}")
async with session.get(url, headers=self.headers) as response:
content = await response.read()
async with aiofiles.open(os.path.join(self.download_folder, filename), "wb") as fb:
await fb.write(content)
print(f"已下载 => {filename}.mp4")
def run(self):
if os.path.exists(self.download_folder): # 检查是否存在这个文件夹
print("文件夹已存在")
else:
os.mkdir(self.download_folder) # 不存在则创建
print("文件夹已创建")
loop = asyncio.get_event_loop()
loop.run_until_complete(self.main())
if __name__ == '__main__':
start = datetime.now()
s = Spider()
s.run()
end = datetime.now()
print("=" * 40)
print((end - start).total_seconds(), "秒")
python异步爬虫
最新推荐文章于 2023-09-13 03:09:57 发布