一、学习目标:
2.1 学习beautifulsoup
-
学习beautifulsoup,并使用beautifulsoup提取内容。
-
使用beautifulsoup提取丁香园论坛的回复内容。
**2.2学习xpath **
-
学习xpath,使用lxml+xpath提取内容。
-
使用xpath提取丁香园论坛的回复内容。
-
参考资料:https://blog.csdn.net/naonao77/article/details/88129994
二、代码展示
2.1学习beautifulsoup
from bs4 import BeautifulSoup import requests def get_url(): url = 'http://www.dxy.cn/bbs/thread/626626#626626' headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'} result = requests.get(url=url,headers=headers) #创建解析对象soup,result.text为要解析的内容,lxml解析库 soup = BeautifulSoup(result.text,'lxml') for tbody in soup.find_all(name="tbody"):#find_all 可以传入标签名使用,查找所有符合的 try: #strip=True用于去除字符串头尾的字符 content = tbody.find(name="td",class_="postbody").get_text(strip=True)#find 可以传入标签名加属性,仅查找一个 print(content) print('-'*50) user = tbody.find(name="div",class_="auth").get_text(strip=True) print(user) except: pass if __name__ == "__main__": get_url()
2.2学习xpath
import requests from lxml import etree url = 'http://www.dxy.cn/bbs/thread/626626#626626' headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.131 Safari/537.36'} response = requests.get(url=url,headers=headers) #调用HTML类初始化,etree模块可以自动修正HTML文本 html = etree.HTML(response.text) #tostring方法可以输出修正后的html代码,类型是bytes, # result = etree.tostring(html) #利用decode方法转换成str类型 # print(result.decode('utf-8')) contents = html.xpath('//td[@class="postbody"]')#element类型列表 users = html.xpath('//div[@class="auth"]') for content in contents: #string(.)将element类型列表取值,strip去除字符串两边空格 content1 = content.xpath('string(.)').strip() # print(content1) for i in range(0,len(contents)): content1 = contents[i].xpath('string(.)').strip() user1 = users[i].xpath('string(.)') print(user1+':'+content1) print('='*100)