演示利用代理访问网站
1、下载站大爷IP代理软件,用软件查找可用IP。软件已上传到资源
https://www.zdaye.com/FreeIPList.html
2、提供免费代理IP的网站
[已完成,0个,22:39:48] http://www.xicidaili.com/
[已完成,0个,22:39:48] http://www.xicidaili.com/nn/
[已完成,0个,22:39:48] http://www.xicidaili.com/nt/
[已完成,0个,22:39:59] http://www.iphai.com/free/ng
[已完成,0个,22:39:59] https://www.kuaidaili.com/free/
[已完成,0个,22:39:59] https://www.kuaidaili.com/free/intr/
[已完成,0个,22:40:00] http://http.zhiliandaili.cn/
[已完成,0个,22:40:00] http://www.66ip.cn/
[已完成,15个,22:40:01] http://www.89ip.cn/
[已完成,0个,22:40:01] http://ip.yqie.com/ipproxy.htm
[已完成,0个,22:40:01] http://31f.cn/
[已完成,20个,22:40:02] https://ip.jiangxianli.com/
2、加入判断
from bs4 import BeautifulSoup
import random
import requests
url = "http://www.ip111.cn/"
html = requests.get(url=url)
soup = BeautifulSoup(html.text, 'lxml')
print('本机IP:',soup.find_all('p')[0].text)
http_ip = [
'124.90.49.188:8888'
]
proxy_ip = {
'http' : random.choice(http_ip), #choice() 方法返回一个列表,元组或字符串的随机项。
}
print('使用代理的IP:',proxy_ip)
try:
html2 = requests.get(url=url, proxies=proxy_ip)
code = html2.status_code
soup = BeautifulSoup(html2.text, 'lxml')
print('代理IP,可以使用:', soup.find_all("p")[0].text)
except:
print('代理IP不可以用')
from bs4 import BeautifulSoup
import random
import requests
url = "http://www.ip111.cn/"
# html = requests.get(url=url,proxies=proxy_ip)
# soup = BeautifulSoup(html.text, 'lxml')
# print(soup.find_all('p')[0].text)
http_ip = [
'221.122.91.64:80',
'163.125.65.90:9797'
'58.251.230.5:9797',
'124.205.155.147:9090'
]
flag = True
max_count = 3
sleep_time = 3
while flag:
proxy_ip = {
'http' : random.choice(http_ip), #choice() 方法返回一个列表,元组或字符串的随机项。
}
print('使用代理的IP:',proxy_ip)
html = requests.get(url=url,proxies=proxy_ip)
soup = BeautifulSoup(html.text, 'lxml')
print(soup.find_all("p")[0].text)
max_count -=1
if(max_count==0):
flag = False
print("执行结束")