由于有的JS文件需要动态加载,而如果按照静态页面那样获取,对有些js代码获取不全,Selenium 库可以模拟用户操作浏览器行为,使得程序可以获取到由 JavaScript 生成的页面代码。这种方法可以解决传统的抓取 HTML 静态页面无法获取动态页面内容的问题。
以爬取一个网站的备案码为例(自己修改所匹配的正则表达式便可自行修改所爬取内容):
from selenium import webdriver
import re
from bs4 import BeautifulSoup
import requests
def get_page1(url):
options = webdriver.ChromeOptions()
# 启动chrome浏览器,并访问网站
driver = webdriver.Chrome(options=options)
driver.get(url)
html = driver.page_source
return html
def get_beianhao(html):
"""
从HTML代码中提取备案号信息
"""
compile_rule = re.compile(r'(ICP证[\d]+|[\u4e00-\u9fa5]?ICP备[\d]+)', re.I)
beianhao_list = re.findall(compile_rule, html)
if beianhao_list:
return beianhao_list[0]
else:
return None
def get_links(soup, beianhao, start_url):
"""
从BeautifulSoup对象中查找符合条件的链接
"""
for link in soup.find_all('a'):
href = link.get('href')
if str(link) and str(beianhao) and href: # 判断字符串不为空
if str(beianhao) in str(link):
return href
else:
pass
def main1():
start_url = ''
html = get_page1(start_url)
if html:
soup = BeautifulSoup(html, 'lxml')
beianhao = get_beianhao(html)
if beianhao:
link = get_links(soup, beianhao, start_url)
print(f"网页 {start_url} 的备案号为: {beianhao} 备案链接为: {link}")
else:
print(f"网页 {start_url} 没有备案号")
else:
print(f"获取网页 {start_url} 失败")
if __name__ == '__main__':
main1()