利用它可以不用编写正则表达式即可方便的实现网页信息的提取。
pip3 install beautifulsoup4
用法讲解:
常用解析库:
lxml HTML解析器
lxml XML解析器
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.prettify()) #格式化代码
print(soup.title.string)
------------------------------------------------------------------
#标签选择器:
##选择元素:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.title)
print(type(soup.title))
print(soup.head)
print(soup.p)
如果有很多满足条件,只会输出第一个。
##获取名称:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.title.name)
##获取属性:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.p.attrs['name'])#获取name属性的值
print(soup.p['name'])
##获取内容:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.p.string)
##嵌套选择:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.head.title.string)
##1、子节点和子孙节点:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.p.children)#迭代器
for i,child in enumerate(soup.p.children):
print(i,child)
##2
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.p.contents)#获取p内的所有内容,列表
##3
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.p.descendants) #迭代器
for i,child in enumerate(soup.p.descendants):
print(i,child)
##1、父节点和祖先节点:
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.p.parent) #迭代器
##2
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(enumerate(list(soup.p.parents))) #迭代器
##兄弟节点
from bst import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(list(enumerate(soup.a.next_siblings)))
print(list(enumerate(soup.a.privious_siblings)))
------------------------------------------------------------------
#标准选择器
find_all(name,attrs,recursive,text,**kwargs)
可根据标签名、属性、内容查找文档
##name
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'html')
print(soup.find_all(''ul))
print(type(soup.find_all('ul')[0]))
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
for ul in soup.find_all('ul'):
print(ul.find_all('li'))
##attrs
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.find_all(attrs={'id':'list-1'}))
print(soup.find_all(attrs = {'name':'element'}))
print(soup.find_all(id = 'list-1'))
print(soup.find_all(class_='element')) ##以上四句的输出一样
##text
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.find_all(text='Foo'))
#find(name,attrs,recursive,text,**kwargs)
返回单个元素.
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
print(soup.find('ul'))
print(type(soup.find('ul')))
print(soup.find('page'))
#find_parents(),find_parent()
find_next_siblings() find_next_sibling()
find_all_next() find_next()
find_all_previous() find_previous()
#CSS选择器
通过select()直接传入CSS选择器即可完成选择
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
pirnt(soup.select('.panel.panel-heading'))# class用.
print(soup.select('ul li'))
print(soup.select('#list-2.element'))
print(type(soup.select('ul')[0]))#
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
for ul in soup.select('ul'):
print(ul.select('li'))
##获取属性
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
for ul in soup.select('ul'):print(u['id'])
print(ul.attrs['id'])
##获取内容
from bs4 import BeautifulSoup
soup = BeautifulSoup(html,'lxml')
for ul in soup.select('li'):
print(li.get_text())
#标签选择器
#标准选择器
#CSS选择器
总结:
* 推荐使用lxml解析库,必要使用html.parser
* 标签选择筛选功能弱但是速度快。
* 建议使用find(),find_all()查询匹配单个结果或者多个结果。
* 如果对CSS选择器熟悉可以使用select()
* 记住常用的获取属性和文本值的方法。