- 出现中文注释、Run 报错
# coding=utf-8
# 要写在文件最开始!
安装部分
pip install lxml
# 安装库
# Requirement already satisfied (use --upgrade to upgrade): lxml in c:\python27\li
b\site-packages
pip freeze
# 查看已安装的库
在Windows下面如何安装Beautiful Soup:
1.到http://www.crummy.com/software/BeautifulSoup/
2.下载完成之后需要解压缩,假设放到D:/python下。
3.运行cmd,切换到D:/python/beautifulsoup4-4.1.3/目录下(根据自己解压缩后的目录和下载的版本号修改),
cd /d D:/python/beautifulsoup4-4.1.3
4.运行命令:
setup.py build
setup.py install
5.在IDE下from bs4 import BeautifulSoup,没有报错说明安装成功。
Beautiful soup
# coding=utf-8
import re
from bs4 import BeautifulSoup
html = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title" name="dromouse"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1"><!-- Elsie --></a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
"""
soup = BeautifulSoup(html)
# soup = BeautifulSoup(open('index.htm'))
# print soup.prettify()
# 格式化输出
# --------------tag
# print soup.p.attrs
# print soup.p.get('class')
# print soup.p['class']
# 打印p标签的属性
# ------------- NavigableString
# print soup.p.string
# print type(soup.p.string)
# print type(soup.name)
# print soup.name
# print soup.attrs
# ------------- Comment
# 有注释的内容会被认为是Comment-> 去除了注释符号、但内容还在、所以在使用前最好判断Comment
print soup.a
print soup.a.string
if type(soup.a.string) == bs4.element.Comment:
print soup.a.string
# 子节点_列表输出
print soup.head.contents
print soup.head.children
# 子节点_列表生成器输出
for child in soup.body.children:
print child
# 遍历所有子孙节点
for child in soup.descendants:
print child.string
# 获取多个内容
for string in soup.strings:
print eval(repr(string))
# 同上、去除了空格
for string in soup.stripped_strings:
print repr(string)
# .parent -> 父节点
p = soup.p
print p.parent.name
content = soup.head.title.string
print content.parent.name
# .parents -> 全部父节点
for parent in content.parents:
print parent.name
# .next_sibling .previous_sibling -> 兄弟节点
print soup.p.next_sibling
print soup.p.prev_sibling
print soup.p.next_sibling.next_sibling
for sibling in soup.a.next_siblings:
print(repr(sibling))
# .next_element .previous_element -> 前后节点
print soup.head.next_element
for element in soup.next_elements:
print(repr(element))
# 搜索 文档树 find_all(name, attrs, recursive, text, **kwargs)
print soup.find_all('b')
print soup.find_all('a')
# 传正则 -> 匹配所有以b开头的标签
for tag in soup.find_all(re.compile('^b')):
print(tag.name)
# 传列表 -> 找到文档中所有<a>标签和<b>标签
print soup.find_all(['a', 'b'])
# 传True —> 所有的tag,不返回字符串节点
for tag in soup.find_all(True):
print(tag.name)
# 传方法 -> 过滤器 如果没有合适过滤器,
# 那么还可以定义一个方法,方法只接受一个元素参数 [4] ,
# 如果这个方法返回 True
# 表示当前元素匹配并且被找到,
# 如果不是则反回 False
def has_class_but_no_id(tag):
return tag.has_attr('class') and not tag.has_attr('id')
print soup.find_all(has_class_but_no_id)
# keyword 参数 -> 搜索时会把该参数当作指定名字tag的属性来搜索
print soup.find_all(id='link2')
print soup.find_all(href=re.compile('elsie'))
print soup.find_all(href=re.compile('elsie'), id='link1')
print soup.find_all("a", class_="sister")
print soup.find_all(attrs={"data-foo": "value"})
print soup.find_all(text="Tillie")
print soup.find_all(text=re.compile('Dormouse'))
# limit -> 限制返回的数量
print soup.find_all("a", limit=2)
# recursive 只搜索tag的直接子节点
print soup.html.find_all("title", recursive=True)
# find 直接返回结果
print soup.find("b")
soup.findParents()
soup.findParent
soup.find_next_siblings()
soup.find_next_sibling()
soup.find_previous_siblings()
soup.find_previous_sibling()
soup.find_all_next()
soup.find_next()
soup.find_all_previous()
soup.find_previous()
# css选择器
print soup.select('title')
print soup.select('.sister')
print soup.select('#link1')
print soup.select('p #link1')
print soup.select('head > title')
print soup.select('p a[href="http://example.com/elsie"]')