将html文档转换成beautiful对象,然后调用属性和方法进行定位解析
主要语法:
-
创建Beatiful对象:
本地:BeautifulSoup(open('本地文件'), 'lxml')
网络:Beatiful('网络请求到的页面数据','lxml')
-
获取a标签属性:
soup.a['href']
-
获取文本内容:
soup.a.string 相当于xpath中的 /text()
soup.a.text 相当于xpath中的 //text()
-
soup.find('a', title="xxx")
-
soup.find('div',class_='song')
# 使用bs4实现将诗词名句网站中三国演义小说的每一章的内容爬去到本地磁盘进行存储
# http://www.shicimingju.com/book/sanguoyanyi.html
import requests
from bs4 import BeautifulSoup as BS
# 指定url
url='http://www.shicimingju.com/book/sanguoyanyi.html'
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
}
# 发送请求
response=requests.get(url=url,headers=headers)
# 获取返回响应对象
page_text= response.text
# 解析
soup= BS(page_text,'lxml')
# 找到所有的目标a标签
a_list= soup.select('.book-mulu > ul > li > a ')
# 元素可以继续调用解析属性和方法
# print(a_list)
f = open('./sanguo.txt','w',encoding='utf-8')
for a in a_list:
title = a.string
# a标签:/book/sanguoyanyi/1.html
# 完整的:http://www.shicimingju.com/book/sanguoyanyi/13.html
content_url='http://www.shicimingju.com'+ a['href']
# 最好封装下函数
content_page = requests.get(url=content_url,headers=headers).text
soup = BS(content_page,'lxml')
# p_list = soup.select('.chapter_content > p') 找出p标签还得循环取值不好
content=soup.find('div',class_='chapter_content').text
f.write(title+'\n'+content+'\n\n')
print('已完成')