我最近需要爬取93个表格进行分析,因为表格较多,所以想着用异步的方法速度可能会很快,所以尝试了一下异步爬虫,最后发现这个速度和单进程爬虫时间差不多~因为我的每个表格都很小而且基本不会阻塞~异步还是很有用以后接着学习哈~
先放协程的代码:
import asyncio
import json
import os
import time
import chardet
import pandas as pd
import requests
class charge_Express_cabinet():
def __init__(self):
self.headers = {
'Accept': 'application/json,text/plain,*/*',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q = 0.9',
'Cookie': '人心都是肉长得,何时真心换来真心',
'Host': 'newbms.sudiyi.cn',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://newbms.sudiyi.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
async def cache(self, province, city, city_name): # 一个协程方法(用于缓存这些城市的规则)
url_ = f'http://newbms.sudiyi.cn/admin/api/v1/lattice_rules/export?name=&fee_id=&stranded_time=&send_id=&take_id=&sms_id=&province={province}&city={city}&area='
try:
req = requests.get(url=url_, headers=self.headers)
except Exception as e:
print(f'数据缓存出现问题:{e}')
else:
print(f'{city_name}缓存成功')
return city_name
async def execute_cache(self):
id = pd.read_excel(r'C:\Users\lenovo\Desktop\收费规则爬取和分析\市ID和省ID.xlsx')
tasks = [self.cache(id.sheng_id.tolist()[i], id.shi_id.tolist()[i], id.name.tolist()[i]) for i in range(93)]
complete = await asyncio.gather(*tasks)
for i in complete:
print(f"{i}缓存成功")
async def download(self, url, id):
try:
down_up = requests.get(url, verify=False).content
except Exception as e:
print(f"下载出现以下错误:{e}")
else:
with open(r'C:\Users\lenovo\Desktop\收费规则爬取和分析\{}.csv'.format(id), 'wb') as e:
e.write(down_up)
print(f'规则{id}数据下载成功')
async def execute_download(self):
req = json.loads(
requests.get('http://newbms.sudiyi.cn/api/v1/reports?page=1&per_page=100&sort_field=&sort_order=',
headers=self.headers).text)
req1 = req['data']['data_list'][1:94]
tasks = [self.download(i['download_url'], i['id']) for i in req1]
complete = await asyncio.gather(*tasks)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
first = time.time()
age = charge_Express_cabinet()
loop.run_until_complete(age.execute_cache())
loop.run_until_complete(age.execute_download())
finally:
second = time.time()
loop.close()
print(f'缓存共耗时{second - first}s')
这个爬虫是先将93个表格缓存在公司网站上,然后通过下载链接将其读取成字节储存成csv文件,用了两个主协程,一个用来缓存表格,一个用来下载表格,每个主协程中包含了93个任务。
(只是下载的时间)耗费时间:
接着放单进程的代码:
def what():
first = time.time()
headers = {
'Accept': 'application/json,text/plain,*/*',
'Accept-Encoding': 'gzip,deflate',
'Accept-Language': 'zh-CN,zh;q = 0.9',
'Cookie': '虚空的虚空一切都是虚空',
'Host': 'newbms.sudiyi.cn',
'Proxy-Connection': 'keep-alive',
'Referer': 'http://newbms.sudiyi.cn/',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
}
req = json.loads(
requests.get('http://newbms.sudiyi.cn/api/v1/reports?page=1&per_page=100&sort_field=&sort_order=',
headers=headers).text)
req1 = req['data']['data_list'][24:27]
for i in req1:
url = i['download_url']
id = i['id']
try:
down_up = requests.get(url, verify=False)
down_up = down_up.content
except Exception as e:
print(f"下载出现以下错误:{e}")
else:
with open(r'C:\Users\lenovo\Desktop\test_sudu3\{}.csv'.format(id), 'wb') as e:
e.write(down_up)
print(f'规则{id}数据下载成功')
second = time.time()
print(f'缓存共耗时{second - first}s')
只是下载的时间,耗费时间:
哈哈~对于不阻塞的一个爬虫,协程就比单进程快了几秒钟,等到如果频繁IO的爬虫,协程就会大显身手啦~以后再尝试~
协程的知识我是在这个微信公众号上学的,号主讲的很详细很认真,感兴趣的关一下:Python学习开发