1.BeautifulSoup简解2
from bs4 import BeautifulSoup
import re
file = open("./baidu.html",'rb')
html = file.read()
bs = BeautifulSoup(html,"html.parser") # 解析内容 解析器
# 1 Tag 标签及其内容:拿到它所找到的第一个内容
print(bs.title) # <title>Title</title>
print(bs.title.string) # Title
print(bs.a) # 拿到第一个出现的标签及所有内容
print(type(bs.a)) # Tag html里的标签
# # 2 NavigableString 标签里的内容
print(bs.a.attrs)
print(bs.a.string)
# 3 BeautifulSoup 表示整个文档
print(type(bs))
print(bs.name)
print(bs.attrs)
print(bs)
# 4Comment 是一个特殊的NavigableString 输出的内容不包含注释符号
print(type(bs.a.string))
# ------------------------------应用----------------------------------
# 文档的遍历
print(bs.head.contents) # 获取Tag的所有子节点,返回一个list
print(bs.head.contents[3]) # 获取内容
# 文档的搜索
# (1)find_all()
# 字符串过滤,会查找与字符串完全匹配的内容
t_list= bs.find_all('a')
print(t_list)
# 正则表达式搜索 :使用search()方法来匹配内容
t_list = bs.find_all(re.compile("a")) # 包含a的所有标签及其字样的所有内容
print(t_list)
# # 用方法:传入一个函数,根据函数要求搜索
def name_is_exists(tag):
return tag.has_attr("name")
t_list = bs.find_all(name_is_exists)
print(t_list)
# kwargs 参数
t_list = bs.find_all(id="head")
for i in t_list:
print(i)
t_list = bs.find_all(class_=True)
print(t_list)
t_list = bs.find_all(href="http://news.baidu.com")
print(t_list)
# 3.text参数
t_list = bs.find_all(text='hao123')
print(t_list)
t_list = bs.find_all(text=['hao123','地图','贴吧'])
print(t_list)
t_list = bs.find_all(text=re.compile("\d"))# 用正则表达式来查找包含特定文本的内容
print(t_list)
# 4.limit
t_list = bs.find_all('a',limit=3)
print(t_list)
# CSS 选择器
print(bs.select('title')) # 通过标签来查找
print(bs.select('.mnav')) # 通过类名来查找
print(bs.select('#ul')) # 通过id来查找
print(bs.select("a[class='bri']")) # 通过属性来查找
t_list = bs.select("head > title")
print(t_list[0]) # <title>Title</title>
print(t_list[0].get_text()) # Title
2、应用
from bs4 import BeautifulSoup
import re
import urllib
findLink = re.compile(r'<a href=(.*?)>')
findimg = re.compile(r'<img.*src="(.*?)"', re.S)
findtitle = re.compile(r'<span class="title">(.*?)</span>', re.S)
findscore= re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>', re.S)
findjudge = re.compile(r'<span>(\d+)人评价</span>', re.S)
findInq = re.compile(r'<span class="inq">(.*)</span>', re.S)
findBd = re.compile(r'<p class="">.*?</p>',re.S)
def main():
baseurl = "http://movie.douban.com/top250?start="
# 1.爬取网页
datalist = getData(baseurl)
# 3.保存数据
savepath =r'.\\豆瓣电影top250.xls'
# saveData(savepath)
# askURL("http://movie.douban.com/top250?start=0")
# 爬取网页
def getData(baseurl):
datalist = []
for i in range(0,1):
url = baseurl + str(i*25)
html = askURL(url) # 保存获取到的网页源码
# 2.逐一进行解析
soup = BeautifulSoup(html,"html.parser")
for item in soup.find_all('div',class_="item"): # 查找符合要求的字符串 形成列表
# print(item)
data = [] # 保存一部电影的所有信息
# 影片详情的链接
item = str(item)
link = re.findall(findLink,item)[0] #通过正则表达式查找指定的字符串
data.append(link)
imgsrc = re.findall(findimg,item)
data.append(imgsrc)
titles = re.findall(findtitle, item)
if len(titles)==2:
ctitle=titles[0]
data.append(ctitle)
otitle = titles[1].replace("/","")
data.append(otitle)
else:
data.append(titles[0])
data.append(' ') # 留空
score = re.findall(findscore, item)[0] # 评分
data.append(score)
judgenum = re.findall(findjudge, item)[0] # 评价人数
data.append(judgenum)
inq = re.findall(findInq, item) # 概述
if len(inq)!=0:
inq = inq[0].replace(". ","")
data.append(inq)
else:
data.append(" ")
bd = re.findall(findBd, item)[0] # 导演
bd = re.sub('<br(\s+)?/>(\s+)?'," ",bd)
bd = re.sub('/', "", bd)
data.append(bd.strip()) # 去掉前后的空格
print(data)
datalist.append(data)
return datalist
def askURL(url):
head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/89.0.4389.128 Safari/537.36"} #用户代理 我们是什么类型的机器(本质告诉浏览器 我们接收什么样的信息)
request = urllib.request.Request(url, headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode('utf-8')
# print(html)
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
return html
# 3.保存数据
def saveData(savepath):
pass
if __name__=="__main__":
main()