- 本人系统:macOS10.15.6 Catalina
- 场景:使用Python requests 包+ip代理池爬取网站数据
- 出现报错:HTTPSConnectionPool(host=‘xxxxx’, port=443): Max retries exceeded with url:xxxxxxxx (Caused by Ne…
具体情况下面这个博主已经基本提到了:
但是依然出现这个报错。
import time
import random
import requests
USER_AGENTS = [
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
"Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10"
]
headers = {
"User-Agent": ""
}
# 借助上面的USER-AGENT反爬
s = requests.session()
s.keep_alive = False
requests.adapters.DEFAULT_RETRIES = 10
url = "https://baike.baidu.com/item/人工智能/9180?fromtitle=AI&fromid=25417&fr=aladdin"
for i in range(10):
proxys = {
# news_ip是已经读取好的ip 就不放上面代码了
"https": "http://"+ new_ips[i],
"http": "http://" + new_ips[i]
}
headers['User-Agent'] = random.choice(USER_AGENTS)
print(proxys)
print(headers['User-Agent'])
req = requests.get(url, headers=headers, verify = False, proxies = proxys, timeout = 20).content.decode('utf-8')
print(req)
time.sleep(5)
首先需要确认自己的的IP地址是否可行。这里参考
传送门
给出的方式,确认不是IP不可行的问题。
后来在知乎上看到有人在传入proxy给proxies的时候,
将 字 典 中 的 " h t t p s " 和 " h t t p " 全 部 大 写 了 , 尝 试 之 后 确 实 可 行 了 将字典中的"https"和"http"全部大写了,尝试之后确实可行了 将字典中的"https"和"http"全部大写了,尝试之后确实可行了.
for i in range(10):
proxys = {
"HTTPS": "HTTP://"+ new_ips[i],
"HTTP": "HTTP://" + new_ips[i]
# 在这里全部大写了!
}
headers['User-Agent'] = random.choice(USER_AGENTS)
print(proxys)
print(headers['User-Agent'])
req = requests.get(url, headers=headers, verify = False, proxies = proxys, timeout = 20).content.decode('utf-8')
print(req)
time.sleep(5)
记录一下今晚踩的几个雷:
- 注意字典proxy中,对于每个value无论key是HTTP还是HTTPS,都用HTTP开头!只有key用HTTPS!
- 如果requests想要爬取的网站是https:// ,那么一定一定需要在requests里加上verify = False这句话
- 这里是我用的免费ip代理池,勉强能用。下面把这个网站的爬取ip的代码放在这:
# 获取可用的IP代理
import re
import requests
header = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
"User-Agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"
}
response = requests.get("http://www.66ip.cn/areaindex_11/1.html", headers = header)
encode_content = response.content.decode('gb18030', 'replace')
ips = re.findall("<td>(?:[0-9]{1,3}\.){3}[0-9]{1,3}</td><td>[0-9]{1,5}</td>", encode_content)
new_ips = []
# proxies = []
for i in ips:
# print(i)
new_ips.append(i.replace("</td><td>", ":"))
# print(i)
for i in range(len(new_ips)):
new_ips[i] = new_ips[i].replace("<td>", "")
new_ips[i] = new_ips[i].replace("</td>", "")
for i in new_ips:
print(i)