baseurl = 'http://www.xicidaili.com/nn/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
}
http_list = []
def get_IP():
print('-----IP爬取进度-----')
# 只爬取4页的ip
for i in range(1,5):
print('------第' + str(i) + '页开始爬取------')
url = baseurl + str(i)
raw_html = requests.get(url, headers=headers).text
# print(raw_html)
selector = etree.HTML(raw_html)
# td[index]中index从1开始,分别获取ip,port,type
ip = selector.xpath('//tr[@class="odd"]//td[2]/text()')
port = selector.xpath('//tr[@class="odd"]//td[3]/text()')
httptype = selector.xpath('//tr[@class="odd"]//td[6]/text()')
# 上述ip/port/type均为list数据(存储的是当页数据),我们需要将其每一个数据转换为{'HTTP':'HTTP://ip:port'}格式,用在requests.get里面的proxies参数里面!
for eachip,eachport,eachtype in zip(ip,port,httptype):
http_dict = {}
http_dict[eachtype] = eachtype + '://' + eachip + ':' + eachport
http_list.append(http_dict)
print(http_list) # 两页总共的ip
print('------第' + str(i) + '页爬取结束------')
# 返回的数据格式为[{},{},{}.....]
return http_list
print('------IP爬虫结束------')