import requests from lxml import etree import time import multiprocessing # 耗时 84.26855897903442 5 # 耗时 44.181687355041504 10 # 耗时 29.013262033462524 20 # 耗时 22.825448036193848 50 def get_all_proxy(queue): url = 'http://www.xicidaili.com/nn/1' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36', } response = requests.get(url, headers=headers) # with open('song.html', 'wb') as f: # f.write(response.content) html_ele = etree.HTML(response.text) ip_eles = html_ele.xpath('//table[@id="ip_list"]/tr/td[2]/text()') port_ele = html_ele.xpath('//table[@id="ip_list"]/tr/td[3]/text()') proxy_list = [] for i in range(0,len(ip_eles)): proxy_str = 'http://' + ip_eles[i] + ':' + port_ele[i] queue.put(proxy_str) def check_one_proxy(proxy): try: url = 'http://www.baidu.com/s?wd=ip' proxy_dict = { 'http': proxy } try: response = requests.get(url, proxies=proxy_dict, timeout=5) if response.status_code == 200: print('这个人头送的好' + proxy) return proxy else: print('这个人头没送好') return proxy except: return None except Exception as e: print(e) if __name__ == '__main__': start_time = time.time() # 创建队列 q = multiprocessing.Queue() # pool 进程池中, 要用的是下面的这个queue #result_q = multiprocessing.Manager().Queue() # 获取所有代理 p = multiprocessing.Process(target=get_all_proxy, args=(q,)) p.start() # 检测代理的可用性 pool = multiprocessing.Pool(50) result_list = [] while True: try: proxy_str = q.get(timeout=5) except: break proxy_res = pool.apply_async(check_one_proxy, (proxy_str,)) result_list.append(proxy_res) valid_proxy_list = [] for proxy_res in result_list: result = proxy_res.get() if result is None: pass else: valid_proxy_list.append(result) print('All proxy we can get:') print(valid_proxy_list) pool.close() pool.join() p.join() end_time = time.time() print('--'*30) print('耗时:' + str(end_time-start_time))
多线程爬取xici代理,查找可用代理
最新推荐文章于 2021-12-15 23:02:15 发布
这段代码演示了如何利用多进程和requests库从xicidaili网站获取代理IP,并通过访问百度验证其可用性。通过创建一个队列来分配代理IP给多个工作进程进行有效性检查,最终输出可用的代理列表。
摘要由CSDN通过智能技术生成