01地址获取
import requests
import parsel
import time
def check_ip(proxies_list):#检测代理ip的质量
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'}
can_use = []
for proxy in proxies_list:
requests.get('https://www.baidu.com',headers=headers,proxies=proxy,timeout=0.1)
proxies_list = []
for page in range(1,5):
print('=========正在获取地{}页数据=========='.format(page))
base_url = 'https://www.kuaidaili.com/free/inha/{}/'.format(str(page))
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36'}
response =requests.get(base_url,headers=headers)
# print(response.request.headers)
data = response.text
# print(data)
#转换数据类型
html_data = parsel.Selector(data)
parse_list = html_data.xpath('//table[@class="table table-bordered table-striped"]/tbody/tr')
for tr in parse_list:
dict_proxies ={}
http_type = tr.xpath('./td[4]/text()').extract_first()
ip_num = tr.xpath('./td[1]/text()').extract_first()
ip_port = tr.xpath('./td[2]/text()').extract_first()
# print(http_type,ip_num,ip_port)
#构建字典
dict_proxies[http_type] = ip_num + ':'+ip_port
print(dict_proxies)
proxies_list.append(dict_proxies)
time.sleep(0.5)
print(proxies_list)
print('获取代理ip的数据量:',len(proxies_list))