BeautifulSoup安装
1、以管理员运行cmd
2、输入 pip install beautifulsoup4
BeautifulSoup库的基本元素
BeautifulSoup库的理解:BeautifulSoup库是解析、遍历、维护“标签树”的功能库
基本元素 | 说明 |
---|---|
Tag | 标签,最基本的信息组织单元,分别用<>和</>标明开头和结尾 |
Name | 标签的名字,< p >…< /p>的名字是‘p’,格式:< tag>.name |
Attributes | 标签的属性,字典形式组织,格式:< tag>.sttrs |
NavigableString | 标签内非属性字符串,<>…</>中字符串,格式:< tag>.string |
comment | 标签内字符串的注释部分,一种特殊的comment类型 |
- 1、实例
以https://python123.io/ws/demo.html为练习对象
以下图为网页源码
1、打印title
from bs4 import BeautifulSoup
import requests
r = requests.get(r'https://python123.io/ws/demo.html')
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.title)
结果:
<title>This is a python demo page</title>
2、获取标签信息
from bs4 import BeautifulSoup
import requests
r = requests.get(r'https://python123.io/ws/demo.html')
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.a) #获取a标签,当有多个a标签的时候,soup.a只会得到第一个a标签
print(soup.a.name) #获取a标签的名字
print(soup.a.parent.name) #获取a标签父级名字
print(soup.a.parent.parent.name) #获取a标签父级的父级的名字
结果:
<a class="py1" href="http://www.icourse163.org/course/BIT-268001" id="link1">Basic Python</a>
a
p
body
3、获取标签的属性
from bs4 import BeautifulSoup
import requests
r = requests.get(r'https://python123.io/ws/demo.html')
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
tag = soup.a
print(tag.attrs)
结果:
{'href': 'http://www.icourse163.org/course/BIT-268001', 'class': ['py1'], 'id': 'link1'}
4、获取标签的NavigableString
from bs4 import BeautifulSoup
import requests
r = requests.get(r'https://python123.io/ws/demo.html')
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
tag = soup.a
print(tag.string)
结果:
Basic Python
5、comment(暂有问题,需修改)
from bs4 import BeautifulSoup
import requests
soup = BeautifulSoup("<b><!--this is a comment--></b><p>this is not a comment</p>","html.parser")
tag = soup.b
print(tag.string)
print(type(tag))
tag1 = soup.p
print(tag1.string)
print(type(tag1))
结果:
this is a comment
<class 'bs4.element.Tag'>
this is not a comment
<class 'bs4.element.Tag'>
BeautifulSoup库的解析器
解析器 | 使用方法 | 条件 |
---|---|---|
bs4的HTML解析器 | BeautifulSoup(mk,“html.parser”) | 安装bs4库 |
lxml的HTML解析器 | BeautifulSoup(mk,“lxml”) | pip install lxml |
lxml的xml解析器 | BeautifulSoup(mk,“xml”) | pip install lxml |
html5lib解析器 | BeautifulSoup(mk,“html5lib”) | pip install html5lib |
基于bs4库的HTML内容遍历方法
- 1、标签树的下行遍历
属性 | 说明 |
---|---|
.contents | 子节点的列表,将< tag>所有的儿子节点存入列表 |
.children | 子节点的迭代类型,与 .contents类似,用于循环遍历儿子节点 |
.descendants | 子孙节点的迭代类型,包含所有子孙节点(既一个节点后面所有的节点信息),用于循环遍历 |
练习:
1、获取子节点信息
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.body.contents) #获取子节点信息
print(len(soup.body.contents)) #获取子节点的个数
print(soup.body.contents[1]) #通过坐标获取单个子节点
结果:
['\n', <p class="title"><b>The demo python introduces several python courses.</b></p>, '\n', <p class="course">Python is a wonderful general-purpose programming language. You can learn Python from novice to professional by tracking the following courses:
<a class="py1" href="http://www.icourse163.org/course/BIT-268001" id="link1">Basic Python</a> and <a class="py2" href="http://www.icourse163.org/course/BIT-1001870001" id="link2">Advanced Python</a>.</p>, '\n']
5
<p class="title"><b>The demo python introduces several python courses.</b></p>
2、遍历
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
# 遍历儿子节点
print(type(soup.body.children))
for chil in soup.body.children:
print(chil)
#遍历子孙节点
for chil in soup.body.descendants:
print(chil)
- 2、标签树的上行遍历
属性 | 说明 |
---|---|
.parent | 子节点的父标签 |
.parents | 节点先辈标签的迭代类型,用于循环遍历先辈节点 |
练习:
1、获取父节点
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.title.parent)
结果:
<head><title>This is a python demo page</title></head>
2、遍历父节点
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
# 遍历先辈节点
for parent in soup.a.parents:
if parent is None:
print(parent)
else:
print(parent.name)
结果:
p
body
html
[document]
- 3、标签树的平行遍历
平行遍历发生在同一个父节点下的各节点间
属性 | 说明 |
---|---|
.next_sibling | 返回按照HTML文本顺序的下一个平行节点标签 |
.previous_sibling | 返回按照HTML文本顺序的上一个平行节点标签 |
.next_siblings | 迭代类型,返回按照HTML文本顺序的后续所有平行节点标签 |
.previous_siblings | 迭代类型,返回按照HTML文本顺序的前续所有平行节点标签 |
练习:
1、获取平行节点
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.a.next_sibling) #由此可见,标签的平行标签不一定是一个标签
print(soup.a.next_sibling.next_sibling)
print(soup.a.previous_sibling)
结果:
and
<a class="py2" href="http://www.icourse163.org/course/BIT-1001870001" id="link2">Advanced Python</a>
Python is a wonderful general-purpose programming language. You can learn Python from novice to professional by tracking the following courses:
2、遍历
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
# 遍历后续节点
for sibling in soup.a.next_siblings:
print(sibling)
# 遍历前续节点
for sibling in soup.a.previous_siblings:
print(sibling)
基于bs64库的HTML格式化和编码
格式化:bs4库的prettify()方法
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup)
print(soup.prettify())
结果:
<html><head><title>This is a python demo page</title></head>
<body>
<p class="title"><b>The demo python introduces several python courses.</b></p>
<p class="course">Python is a wonderful general-purpose programming language. You can learn Python from novice to professional by tracking the following courses:
<a class="py1" href="http://www.icourse163.org/course/BIT-268001" id="link1">Basic Python</a> and <a class="py2" href="http://www.icourse163.org/course/BIT-1001870001" id="link2">Advanced Python</a>.</p>
</body></html>
<html>
<head>
<title>
This is a python demo page
</title>
</head>
<body>
<p class="title">
<b>
The demo python introduces several python courses.
</b>
</p>
<p class="course">
Python is a wonderful general-purpose programming language. You can learn Python from novice to professional by tracking the following courses:
<a class="py1" href="http://www.icourse163.org/course/BIT-268001" id="link1">
Basic Python
</a>
and
<a class="py2" href="http://www.icourse163.org/course/BIT-1001870001" id="link2">
Advanced Python
</a>
.
</p>
</body>
</html>
由结果可以看得出来,使用prettify()方法之后,他在每个标签的后面都加了换行符/n,使结果更加美化的展示出来
信息标记
信息标记的三种方式:
- 1、xml
- 2、json
- 3、yaml
三种信息标记形式的比较:
XML:
1、最早的通用信息标记语言,可扩展性好,但繁琐
2、Internet上的信息交互与传递
json:
1、信息有类型,适合程序处理(js),较xml简洁
2、移动应用云端和节点的信息通信,无注释
YAML:
1、信息无类型,文本信息比例最高,可读性好
2、各类系统的配置文件,有注释易读
基于bs4库的HTML内容查找方法
<>.find_all(name,attrs,recursive,string,**kwargs)
返回一个列表类型,存储查找的结果
name:对标签名称的检索字符串
例:
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.find_all("a")) #也可以使用正则获取包含a的
attrs:对标签属性值的检索字符串,可标注属性检索
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.find_all("p","course")) #查询p标签中包含course的值
recursive:是否对子孙全部检索,默认为True
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.find_all("p",recursive=False))
string:<>…</>中字符串区域的检索字符串
from bs4 import BeautifulSoup
import requests
url = "https://python123.io/ws/demo.html"
r = requests.get(url)
demo = r.text
soup = BeautifulSoup(demo,"html.parser")
print(soup.find_all(string="Basic Python"))
扩展方法
方法 | 说明 |
---|---|
<>.find() | 搜索且只返回一个结果,字符串类型,同find_all()参数 |
<>.find_parents() | 在先辈节点中搜索,返回列表类型,同.find_all()参数 |
<>.find_parent() | 在先辈节点中返回一个结果,字符串类型,同.find_all()参数 |
<>.find_next_siblings() | 在后续平行节点中搜索,返回列表类型,同.find_all()参数 |
<>.find_next_sibling() | 在后续平行节点中返回一个结果,字符串类型,同.find_all()参数 |
<>.find_previous_siblings() | 在前续平行节点中搜索,返回列表类型,同.find_all()参数 |
<>.find_previous_sibling() | 在前续平行节点中返回一个结果,字符串类型,同.find_all()参数 |
- 1、实例1,在最好大学网获取前20的大学排名
from bs4 import BeautifulSoup
import requests
import bs4
def get_url(url):
try:
r = requests.get(url,timeout= 30)
r.raise_for_status() #产生异常信息
r.encoding = r.apparent_encoding #修改编码
return r.text
except:
return ""
return ""
def get_txt(ulist,html):
soup = BeautifulSoup(html,"html.parser")
for tr in soup.find("tbody").children:
if isinstance(tr,bs4.element.Tag): #过滤不是bs4.element.Tag标签
tds = tr("td") #查询所有的td标签
ulist.append([tds[0].string,tds[1].string,tds[3].string])
def print_txt(ulist,num):
tplt = "{0:^10}\t{1:{3}^10}\t{2:^10}"
#{1:{3}^10} 1表示位置,{3}表示用第3个参数来填充,从0开始算,^表示居中,10表示占10个位置
# https://blog.csdn.net/james_616/article/details/79004482,关于format的介绍
# chr(12288) 中文空格
print(tplt.format("排名","学校名称","总分",chr(12288)))
for i in range(num):
u = ulist[i]
print(tplt.format(u[0],u[1],u[2],chr(12288)))
def main():
uinfo = []
url = "http://www.zuihaodaxue.com/zuihaodaxuepaiming2018.html"
html = get_url(url)
get_txt(uinfo,html)
print_txt(uinfo,20) #打印前20
main()