from urllib import request import random import re def get_html(url):#获取西刺网站的html源码 header = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36' } req = request.Request(url, headers = header) response = request.urlopen(req) html = response.read().decode() return html def get_proxy(html):#利用re,找出每个代理的ip 和端口,存入列表 firstlist = re.findall(r'<tr class=.*?>(.*?)</tr>',html,re.S) print(len(firstlist)) for i in firstlist: ip=re.findall(r'\d+\.\d+\.\d+\.\d+',i,re.S)[0] port= re.findall(r'<td>(\d+)</td>',i,re.S)[0] proxy = '{}:{}'.format(ip,port) proxy_list.append(proxy) return proxy_list