1、 lxml
pip install lxml
import requests
from lxml import etree
# 发送HTTP请求获取文本内容
url = 'https://example.com'
response = requests.get(url)
html_content = response.text
# 将文本内容解析为Element对象
tree = etree.HTML(html_content)
# 使用XPath选择元素
selected_elements = tree.xpath('//h1') # 使用实际的XPath表达式
# 打印提取到的元素文本
for element in selected_elements:
print(element.text)
2、bs4
pip install beautifulsoup4
from bs4 import BeautifulSoup
import requests
url = 'https://example.com'
response = requests.get(url)
html_content = response.text
soup = BeautifulSoup(html_content, 'html.parser')
selected_elements = soup.select('h1')
for element in selected_elements:
print(element.text)
3、xml.etree.ElementTree
内置库
import xml.etree.ElementTree as ET
import requests
url = 'https://example.com'
response = requests.get(url)
xml_content = response.text
root = ET.fromstring(xml_content)
selected_elements = root.findall('.//element_name')
for element in selected_elements:
print(element.text)
4、scrapy
pip install scrapy
import scrapy
from scrapy.http import HtmlResponse
# 要转换的字符串内容
html_content = "<html><body><h1>Hello, World!</h1></body></html>"
# 创建一个 HtmlResponse 对象
response = HtmlResponse(url='http://example.com', body=html_content, encoding='utf-8')
# 使用 XPath 选择元素
selected_elements = response.xpath('//h1')
for element in selected_elements:
print(element.xpath('string()').get())
四种方法得到的对象 都可以说使用.xpath 去匹配对应的html元素。