Python爬虫:基础操作02(Re模块 / Bs4模块 / XPath模块)
文章目录
写在前面
使用目的:模块化程序,用于新手快速上手
程序片段及功能
1. Re模块
1.Re模块:筛选切割数据(与正则表达式配合使用效果更佳
)
import re
# findall: 匹配字符串中所有的符合正则的内容
lst = re.findall(r"\d+", "XXXX:10086, XXXX:10010")
print(lst)
# finditer: 匹配字符串中所有的内容[返回的是迭代器], 从迭代器中拿到内容需要.group()
it = re.finditer(r"\d+", "XXXX:10086, XXXX:10010")
for i in it:
print(i.group())
# search, 找到一个结果就返回, 返回的结果是match对象. 拿数据需要.group()
s = re.search(r"\d+", "XXXX:10086, XXXX:10010")
print(s.group())
# match是从头开始匹配
s = re.match(r"\d+", "10086, XXXX:10010")
print(s.group())
# (?P<分组名字>正则) 可以单独从正则匹配的内容中进一步提取内容
s = """
<div class='jay'><span id='1'>一号嘉宾</span></div>
<div class='jj'><span id='2'>二号嘉宾</span></div>
<div class='jolin'><span id='3'>三号嘉宾</span></div>
<div class='sylar'><span id='4'>四号嘉宾</span></div>
<div class='tory'><span id='5'>五号嘉宾</span></div>
"""
obj = re.compile(r"<div class='.*?'><span id='(?P<id>\d+)'>(?P<wahaha>.*?)</span></div>", re.S) # re.S: 让.能匹配换行符
result = obj.finditer(s)
for it in result:
print(it.group("wahaha"), "\t", it.group("id"))
2.使用方法:
import re # 导入正则表达式模块re
p = r'\w+@zhijieketang\.com' # 声明正则表达式
email = 'tony_guan588@zhijieketang.com' # 要验证的字符串
m = re.search(p, email) # 验证输入的字符串是否匹配
print(m)
if m: # m非None则匹配
print('匹配')
else:
print('不匹配')
p = r'\w+@163\.com' # 声明正则表达式
# 要验证的长字符串
text = '''
Tony's email is tony_guan588@163.com."
Tom's email is tom@163.com."
张三的邮箱是:zhang@163.com。"
'''
# findall函数在text字符串中查找所有的163邮箱
mlist = re.findall(p, text)
print(mlist)
2. Bs4模块
1.Bs4模块:主要的功能是解析和提取 HTML/XML 数据
import re
from bs4 import BeautifulSoup
html = """
<html><head><title>The Dormouse's story</title><title>The Dormouse's story2</title></head>
<body>
<p class="title" name="dromouse"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1"><!-- Elsie --></a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html, "lxml") # 解析字符串形式的html
# Bs4属性名
print(soup.title) # 根据标签名获取标签信息 soup.标签名
print(soup.title.string) # 获取标签内容
print(soup.title.name) # 获取标签名
print(soup.p.attrs["name"]) # 获取标签内所有属性
print(soup.head.contents) # 获取直接子标签,结果是一个列表
for i in soup.head.children: # 获取直接子标签,结果是一个生成器
print(i)
# 查找对应标签
data=soup.find_all("a") # 根据字符串查找所有的a标签,返回一个结果集,里面装的是标签对象
for i in data:
print(i.string)
data2=soup.find_all(re.compile("^b")) # 根据正则表达式查找标签
for i in data2:
print(i.string)
data3=soup.find_all(id="link2") # 根据属性查找标签
for i in data3:
print(i)
data4=soup.find_all(text="Lacie") # 根据标签内容获取标签内容
data5=soup.find_all(text=["Lacie","Tillie"])
data6=soup.find_all(text=re.compile("Do"))
print(data6)
# CSS选择器类型:标签选择器、类选择器、id选择器
data=soup.select("a") #通过标签名查找
data=soup.select(".sister") #通过类名查找
data=soup.select("#link2") #通过id查找
data=soup.select("p #link1") #组合查找
data=soup.select('a[href="http://example.com/tillie"]') #通过其他属性查找
3. XPath模块
1.XPath模块:根据层级提取信息
from lxml import etree
xml = """
<book>
<id>1</id>
<name>1-1</name>
<price>1-2</price>
<nick>1-3</nick>
<author>
<nick id="10086">1-4.1</nick>
<nick id="10010">1-4.2</nick>
<nick class="joy">1-4.3</nick>
<nick class="jolin">1-4.4</nick>
<div>
<nick>1-4.5</nick>
</div>
<span>
<nick>1-4.6</nick>
</span>
</author>
<partner>
<nick id="ppc">1-5.1</nick>
<nick id="ppbc">1-5.2</nick>
</partner>
</book>
"""
# 方法一
tree = etree.XML(xml) # 加载etree对象
result = tree.xpath("/book/name") # /表示层级关系. 第一个/是根节点
result = tree.xpath("/book/name/text()") # text() 拿文本
result = tree.xpath("/book/author//nick/text()") # // 后代
result = tree.xpath("/book/author/*/nick/text()") # * 任意的节点
result = tree.xpath("/book//nick/text()")
result = tree.xpath("//nick[last()-1]/text()") # 获取倒数第二个li元素下a的内容
print(result)
# 方法二()
# --------------------------------------------------------- #
tree = etree.parse("b.html")
result = tree.xpath("/html/body/ul/li[1]/a/text()") # xpath的顺序是从1开始数的, []表示索引
result = tree.xpath("/html/body/ul/li/a/@href")
result = tree.xpath("/html/body/ol/li/a[@href='dapao']/text()") # [@xxx=xxx] 属性的筛选
ol_li_list = tree.xpath("/html/body/ol/li")
for li in ol_li_list: # 从每一个li中提取到文字信息
result = li.xpath("./a/text()") # 在li中继续去寻找. 相对查找
print(result)
result2 = li.xpath("./a/@href") # 拿到属性值: @属性
print(result2)
print(tree.xpath('/html/body/div[1]/text()'))
print(tree.xpath('/html/body/ol/li/a/text()'))
代码附件
- Python爬虫:基础操作02-1:Requests+Xpath 爬取网页多组文本(基础方法)
- Python爬虫:基础操作02-2:Requests+Xpath 爬取网页多组图片(类方法、重构url、文件保存)
- Python爬虫:基础操作02-3:Requests+Re 爬取网页多组文本(基础方法、csv文件保存)
- Python爬虫:基础操作02-4:Requests+Re 爬取网页多组文本(基础方法、重构url)
- Python爬虫:基础操作02-5:Requests+Bs4 爬取网页多组文本(基础方法、csv文件保存)
- Python爬虫:基础操作02-6:Requests+Bs4 爬取网页多组图片(基础方法、重构url、文件保存)
- Python爬虫:基础操作02-7:Requests+Xpath 爬取网页多组文本(基础方法)
- Python爬虫:基础操作02-8:Requests+Bs4 爬取网页多组文本(基础方法)