在使用python爬虫的时候,经常会遇见所要爬取的网站采取了反爬取技术,高强度、高效率地爬取网页信息常常会给网站服务器带来巨大压力,所以同一个IP反复爬取同一个网页,就很可能被封,那如何解决呢?使用代理ip,设置代理ip池。
这个代码首先获取 http://www.xicidaili.com/nn/网站中的一个随机的代理ip。然后就是刷自己的CSDN访问量的,,,,,(为了学习,为了学习)
from bs4 import BeautifulSoup
import requests
import random
def get_ip_list(url, headers):
web_data = requests.get(url, headers=headers)
soup = BeautifulSoup(web_data.text, 'lxml')
ips = soup.find_all('tr')
ip_list = []
for i in range(1, len(ips)):
ip_info = ips[i]
tds = ip_info.find_all('td')
ip_list.append(tds[1].text + ':' + tds[2].text)
return ip_list
def get_random_ip(ip_list):
proxy_list = []
for ip in ip_list:
proxy_list.append('http://' + ip)
proxy_ip = random.choice(proxy_list)
proxies = {'http': proxy_ip}
return proxies
if __name__ == '__main__':
url = 'http://www.xicidaili.com/nn/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}#请求头
ip_list = get_ip_list(url, headers=headers)
proxies = get_random_ip(ip_list) #获取一个随机的ip
url = ['https://blog.csdn.net/T_I_A_N_/article/list/1?', 'https://blog.csdn.net/T_I_A_N_/article/list/2?',
'https://blog.csdn.net/T_I_A_N_/article/list/3?', 'https://blog.csdn.net/T_I_A_N_/article/list/4?']
for i in url:
response = requests.get(i, headers=headers, proxies=proxies)
soup = BeautifulSoup(response.text, 'html.parser')
div = soup.find(name='div', attrs={'class': 'article-list'})
href = div.find_all(name='h4')
a = []
for link in href:
b = link.find(name='a')
hr = b.attrs.get('href')
a.append(hr)
for i in a:
response = requests.get(i)