一、导入相关库
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
二、设置相关参数
1、初级设置,可以满足大部分的爬
is_headless = input('是否打开浏览器(是1否0):')
# 设置Chrome浏览器启动时的相关属性
options = webdriver.ChromeOptions()
# 设置user-agent,这个自己问F2
options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.54 Safari/537.36')
# 判断是否使用无界面浏览器
if is_headless == '0':
# 必须设置参数
options.add_argument('--headless')
# 可选设置参数,根据实际情况使用即可
options.add_argument('--window-size=1920,1080') # 自定义窗口界面大小,可不设置,有默认的
options.add_argument('--log-level=3') # 设置控制台日志输出级别
options.add_argument('--disable-gpu')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--disable-software-rasterizer")
options.add_argument("-enable-webgl")
options.add_argument('--no-sandbox')
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_experimental_option('excludeSwitches', ['enable-automation'])
下面的“中级操作”和“高级操作”可以不设置,但必须记得有此操作。
2、中级设置,不加载图片、css和js,加快网页的加载速度,可以满足跟图片和渲染无关的爬
# 不加载图片和CSS
prefs = {
'profile.default_content_setting_values': {
'images': 2,
'permissions.default.stylesheet': 2
# 'javascript': 2 # 不加载js,对于动态网页的爬取不适合,否则很多操作无法响应,但可以注释的形式保留,方便日后操作
}
}
options.add_experimental_option('prefs', prefs)
3、高级设置,添加代理ip,不以真实面目在网络的世界爬来爬去,小心……
# 添加IP代理
proxy_ip = input('请输入代理ip:')
options.add_argument("--proxy-server=http://" + proxy_ip)
三、开爬
# chromedriver.exe绝对路径
chromedriver_path = r'C:\Users\admin\Desktop\chromedriver.exe'
# 启动Chrome浏览器
driver = webdriver.Chrome(options=options, executable_path=chromedriver_path)
# 网站链接
pa_url = 'https://www.不存在的网站.com/'
driver.get(pa_url)
四、显式等待网页指定元素的加载
有时候你爬得太快,你想要的元素还没加载完时,会容易出现找不到该元素的异常。使用显式等待网页指定元素的加载可以让你稳稳地爬。
1、WebDriverWait()部分源码,看不懂跳~过即可
POLL_FREQUENCY = 0.5 # How long to sleep inbetween calls to the method
IGNORED_EXCEPTIONS = (NoSuchElementException,) # exceptions ignored during calls to the method
class WebDriverWait(object):
def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None):
"""Constructor, takes a WebDriver instance and timeout in seconds.
:Args:
- driver - Instance of WebDriver (Ie, Firefox, Chrome or Remote)
- timeout - Number of seconds before timing out
- poll_frequency - sleep interval between calls
By default, it is 0.5 second.
- ignored_exceptions - iterable structure of exception classes ignored during calls.
By default, it contains NoSuchElementException only.
Example:
from selenium.webdriver.support.ui import WebDriverWait \n
element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id("someId")) \n
is_disappeared = WebDriverWait(driver, 30, 1, (ElementNotVisibleException)).\ \n
until_not(lambda x: x.find_element_by_id("someId").is_displayed())
"""
2、如何使用WebDriverWait()
# 可以通过以下方法来定位元素,比如By.ID
# ID = "id"
# XPATH = "xpath"
# LINK_TEXT = "link text"
# PARTIAL_LINK_TEXT = "partial link text"
# NAME = "name"
# TAG_NAME = "tag name"
# CLASS_NAME = "class name"
# CSS_SELECTOR = "css selector"
try:
# 参数解释
# dirver:打开的浏览器变量
# 10::给10秒时间等待该元素的出现
# poll_frequency=0.5:默认的,每隔0.5秒就会自动查找该元素是否有出现
# ignored_exceptions=None:超时后是否忽略异常,None表示超时后抛出异常
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.XPATH,'//*[@id="su"]'))
)
except Exception as e:
print('error:', e)
五、至此,关于爬的技术已经差不多,把上面一二三的代码连起来就算是日常的爬操作了