近日使用爬虫采集数据的时候,发现aiohttp无法请求一些https的url,用requests是可以的,用同事的隧道代理是可以的,但是自己的代理就是不好使,不清楚是不是aiohttp的问题,不过我通过更换httpx库解决了这个问题
网上搜索的解决方案都是类似这种的,使用ssl=False,对我来说并没有用
async def request(self, url, params, headers, allow_404=True): # 爬虫需要请求
while True:
try:
proxy = await self.proxy # 不知道为什么这个代理不好使
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:
async with session.get(url, params=params, headers=headers, proxy=proxy, ssl=False, timeout=5) as resp:
code = resp.status
if any(x in str(resp.url) for x in ['captcha', 'antibot']):
continue
if not allow_404 and code == 404:
continue
text = await resp.text()
break
except Exception as e:
print(e, type(e))
return code, text
修改成httpx后,可以正常使用
async def request(self, url, params, headers, allow_404=True): # 爬虫需要请求
while True:
try:
proxy = await self.proxy
async with httpx.AsyncClient(proxy=proxy, follow_redirects=True) as session:
resp = await session.get(url, params=params, headers=headers, timeout=5)
code = resp.status_code
if any(x in str(resp.url) for x in ['captcha', 'antibot']):
continue
if not allow_404 and code == 404:
continue
text = resp.text
break
except (httpx.ConnectError, httpx.ReadTimeout, httpx.ConnectTimeout):
pass
except Exception as e:
print(e, type(e))
return code, text