在写论文的过程中因为要爬取大量数据,因此少不了ip代理池,快代理免费ip直接抓取引用代码如下,供大家使用,改变start_page和end_page即可获取指定页面的ip列表。
import requests
from bs4 import BeautifulSoup
import time
import random
import csv
# 生成ua
ua_list = ['Mozilla/5.0 (Windows; U; Windows NT 5.1; hu; rv:1.8.0.11) Gecko/20070312 Firefox/1.5.0.11',
'Mozilla/5.0 (Macintosh; Int............ecko) Chrome/114.0.0.0 Safari/537.36 Edg/114.0.1823.67',
'Mozilla/5.0 (Windows NT ............ko) Chrome/100.0.0.0 Safari/537.36']
# http://useragent.kuzhazha.com/PC/ 以上UA来源,大家可以自行改变
user_agent = random.choice(ua_list)
"""
从URL获取多页代理键值对列表
参数:
url (str): 要爬取的网页URL
user_agent (str): User-Agent字符串
start_page (int): 起始页码
end_page (int): 结束页码
返回:
all_proxies: 包含多页代理键值对的列表,每个元素是一个字典,如 [{"http": "http://14.219.193.217:9797"}, ...]
"""
def get_proxies_from_url(url, user_agent, start_page, end_page):
all_proxies = []
for page in range(start_page, end_page + 1):
try:
headers = {'User-Agent': user_agent}
page_url = f"{url}/{page}"
response = requests.get(page_url, headers=headers)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print(f"请求网页 {page_url} 时发生异常:", e)
continue
soup = BeautifulSoup(response.content, 'html.parser')
td_elements = soup.find_all('td', {'data-title': ['IP', 'PORT']})
for i in range(0, len(td_elements), 2):
ip = td_elements[i].text.strip()
port = td_elements[i + 1].text.strip()
proxy = {"http": f"http://{ip}:{port}"}
all_proxies.append(proxy)
# 添加延迟,避免对服务器造成过多压力
time.sleep(2)
return all_proxies
"""
将代理键值对列表保存为CSV文件
参数:
proxies_list: 包含代理键值对的列表,每个元素是一个字典,如 [{"http": "http://14.219.193.217:9797"}, ...]
"""
def save_proxies_to_csv(proxies_list):
with open("proxy.csv", "w", newline="", encoding="utf-8") as csvfile:
fieldnames = ["序号", "proxy值"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for idx, proxy in enumerate(proxies_list, start=1):
writer.writerow({"序号": idx, "proxy值": list(proxy.items())[0]})
# 示例使用:
base_url = "https://www.kuaidaili.com/free/intr"
start_page = 20
end_page = 30
proxies_list = get_proxies_from_url(base_url, user_agent, start_page, end_page)
print("数据已成功写入proxy.csv文件。")