为什么ip会被封?
当我们爬取一些网站时,会发现ip被封了。这是因为网站为了防止被爬取,会有反爬机制,对于同一个ip地址的大量同类型的访问,会封锁ip,过一段时间后才能继续访问
如何解决ip被封的问题?
有几种方法:
1.修改请求头,模拟浏览器(不是代码去直接访问)访问
2.使用代理ip并轮换
3.设置访问时间间隔
如何获取代理ip地址
- 从该网站获取:https://www.xicidaili.com/
- inspect->鼠标定位
- 要获取的代理ip地址,属于class='odd’标签的内容:
#将获取的代理ip保存到proxy_ip_list列表中
#导入相关库
from bs4 import BeautifulSoup
import requests
import time
def open_proxy_url(url):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
try:
r = requests.get(url, headers = headers, timeout = 20)
r.raise_for_status()#检查是否响应
"""
apparent_encoding会从网页的内容中分析网页编码的方式,所以apparent_encoding比encoding更加准确。当网页出现乱码时可以把apparent_encoding的编码格式赋值给encoding。
encoding是从http中的header中的charset字段中提取的编码方式,若header中没有charset字段则默认为ISO-8859-1编码模式,则无法解析中文,这是乱码的原因
"""
r.encoding = r.apparent_encoding#上面的注释是encoding和apparent_encoding之间的区别
return(r.text)
except:
print('无法访问网页' + url)
def get_proxy_ip(response):
proxy_ip_list = []
soup = BeautifulSoup(response, 'html.parser')
proxy_ips = soup.select('.odd')#选择标签
for proxy_ip in proxy_ips:
ip = proxy_ip.select('td')[1].text
port = proxy_ip.select('td')[2].text
protocol = proxy_ip.select('td')[5].text
if protocol in ('HTTP','HTTPS'):
proxy_ip_list.append(f'{protocol}://{ip}:{port}')
return proxy_ip_list
if __name__ == '__main__':
proxy_url = 'https://www.xicidaili.com/'
text = open_proxy_url(proxy_url)
proxy_ip_filename = 'proxy_ip.txt'
with open(proxy_ip_filename, 'w') as f:
f.write(text)
text = open(proxy_ip_filename, 'r').read()
proxy_ip_list = get_proxy_ip(text)
print(proxy_ip_list
获取上面数据的时候发现缺失了一些数据,并非所有的ip地址所在的class都是odd,换另外一种方式来bs4的find_all(‘tr’)来获取所有的ip
def open_proxy_url(url):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
try:
r = requests.get(url, headers = headers, timeout = 20)
r.raise_for_status()#检查是否响应
"""
apparent_encoding会从网页的内容中分析网页编码的方式,所以apparent_encoding比encoding更加准确。当网页出现乱码时可以把apparent_encoding的编码格式赋值给encoding。
encoding是从http中的header中的charset字段中提取的编码方式,若header中没有charset字段则默认为ISO-8859-1编码模式,则无法解析中文,这是乱码的原因
"""
r.encoding = r.apparent_encoding#上面的注释是encoding和apparent_encoding之间的区别
return(r.text)
except:
print('无法访问网页' + url)
proxy_url = 'https://www.xicidaili.com/'
text = open_proxy_url(proxy_url)
proxy_ip_filename = 'proxy_ip.txt'
with open(proxy_ip_filename,'w') as f:
f.write(text)
text = open(proxy_ip_filename, 'r').read()
proxy_ip_list = get_proxy_ip(text)
soup = BeautifulSoup(text, 'html.parser')
proxy_ips = soup.find(id='ip_list').find_all('tr')
tplt = "{0:^4}\t{1:^6}\t{2:^10}\t{3:^16}\t{4:24}"
print(tplt.format("序号","协议","ip地址","端口port","所在地址"))
count = 0
for proxy_ip in proxy_ips:
if len(proxy_ip.select('td')) >= 8:
count += 1
ip = proxy_ip.select('td')[1].text
port = proxy_ip.select('td')[2].text
protocol = proxy_ip.select('td')[5].text
address = proxy_ip.select('td')[3].text
if protocol in ('HTTP','HTTPS','http','https'):
print(tplt.format(count, protocol, ip, port, address))
找到ip代理之后,需要使用代理
- proxies的格式是一个字典
- {‘http’:‘http://IP:port’,‘https’:‘https://IP:port’}
- 把它直接传入requests的get方法中就可以了
- web_data = requests.get(url, headers=headerse, proxies=proxies)
def open_url_using_proxy(url, proxy):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent':user_agent}
proxies = {}
if proxy.startswith('HTTPS'):
proxies['https'] = proxy
else:
proxies['http'] = proxy
try:
r = requests.get(url, headers=headers, proxies=proxies, timeout=10)
r.raise_for_status()
r.encoding = r.apparent_encoding
return (r.text, r.status_code)
except:
print('无法访问网页'+url)
return False
url = 'http://www.baidu.com'
text = open_url_using_proxy(url, proxy_ip_list[0])
text
确认ip地址是否有效
- 无论是免费的还是收费的代理网站,提供的ip代理都未必有效,我们应该验证一下,有效后,再放入代理池中去。可以通过几种方式:
1.访问网站,得到的返回码是200
2.真正的访问某些网站,获取title等,验证title与预计的是否相同
3.访问某些可以提供被访问的ip的网站,类似于’查询我的ip’的网站,查看返回的ip地址是什么
4.验证返回码
def check_proxy_avaliability(proxy):
url = 'http://www.baidu.com'
result = open_url_using_proxy(url, proxy)
VALID_PROXY = False
if result:
text, status_code = result
if status_code == 200:
print('有效代理IP:' + proxy)
else:
print('无效代理IP:' + proxy)
for proxy in proxy_ip_list:
check_proxy_avaliability(proxy)
大多数是无效的,无法访问网页的。
确认网站title
def check_proxy_avaliability(proxy):
url = 'http://www.baidu.com'
text, status_code = open_url_using_proxy(url, proxy)
VALID = False
if status_code == 200:
if r_title:
if r_title[0] == '<title>百度一下,你就知道</title>':
VALID = True
if VALID:
print('有效代理IP: ' + proxy)
else:
print('无效代理IP: ' + proxy)
看一下完整的代码:
from bs4 import BeautifulSoup
import requests
import re
import json
def open_proxy_url(url):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
try:
r = requests.get(url, headers = headers, timeout = 10)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print('无法访问网页' + url)
def get_proxy_ip(response):
proxy_ip_list = []
soup = BeautifulSoup(response, 'html.parser')
proxy_ips = soup.find(id = 'ip_list').find_all('tr')
for proxy_ip in proxy_ips:
if len(proxy_ip.select('td')) >=8:
ip = proxy_ip.select('td')[1].text
port = proxy_ip.select('td')[2].text
protocol = proxy_ip.select('td')[5].text
if protocol in ('HTTP','HTTPS','http','https'):
proxy_ip_list.append(f'{protocol}://{ip}:{port}')
return proxy_ip_list
def open_url_using_proxy(url, proxy):
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36'
headers = {'User-Agent': user_agent}
proxies = {}
if proxy.startswith(('HTTPS','https')):
proxies['https'] = proxy
else:
proxies['http'] = proxy
try:
r = requests.get(url, headers = headers, proxies = proxies, timeout = 10)
r.raise_for_status()
r.encoding = r.apparent_encoding
return (r.text, r.status_code)
except:
print('无法访问网页' + url)
print('无效代理IP: ' + proxy)
return False
def check_proxy_avaliability(proxy):
url = 'http://www.baidu.com'
result = open_url_using_proxy(url, proxy)
VALID_PROXY = False
if result:
text, status_code = result
if status_code == 200:
r_title = re.findall('<title>.*</title>', text)
if r_title:
if r_title[0] == '<title>百度一下,你就知道</title>':
VALID_PROXY = True
if VALID_PROXY:
check_ip_url = 'https://jsonip.com/'
try:
text, status_code = open_url_using_proxy(check_ip_url, proxy)
except:
return
print('有效代理IP: ' + proxy)
with open('valid_proxy_ip.txt','a') as f:
f.writelines(proxy)
try:
source_ip = json.loads(text).get('ip')
print(f'源IP地址为:{source_ip}')
print('='*40)
except:
print('返回的非json,无法解析')
print(text)
else:
print('无效代理IP: ' + proxy)
if __name__ == '__main__':
proxy_url = 'https://www.xicidaili.com/'
proxy_ip_filename = 'proxy_ip.txt'
text = open(proxy_ip_filename, 'r').read()
proxy_ip_list = get_proxy_ip(text)
for proxy in proxy_ip_list:
check_proxy_avaliability(proxy)
关于http和https代理的那些事
可以看到proxies中有两个键值对:
{‘http’: ‘http://IP:port‘,‘https’:'https://IP:port‘}
其中 HTTP 代理,只代理 HTTP 网站,对于 HTTPS 的网站不起作用,也就是说,用的是本机 IP,反之亦然。
我刚才使用的验证的网站是https://jsonip.com, 是HTTPS网站
所以探测到的有效代理中,如果是https代理,则返回的是代理地址
如果是http代理,将使用本机IP进行访问,返回的是我的公网IP地址