爬取python100例的练习题
…
#获取URL
1.获取python100的URL
2.获取对应练习的url
…
from bs4 import BeautifulSoup
import requests
url = ‘https://www.runoob.com/python/python-100-examples.html’
请求头伪装成浏览器去请求 请求头是一个字典
headers= {
‘User-Agent’:‘Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36’
}
发送请求
r = requests.get(url,headers = headers).content.decode(‘utf-8’)
print®
解析html文档
soup_r = BeautifulSoup(r,‘lxml’)
print(type(soup)) 先用type检查一下
查找每个练习的a链接的href属性获取对应的链接地址
re_a = soup_r.find(id = ‘content’).ul.find_all(‘a’) #返回a标签的列表
创建一个列表来保存获取的url地址
lis = []
for i in re_a:
lis.append(i.attrs[‘href’])
print(lis)
2.根据获取的每个练习的链接地址,来请求每个练习获得页面内容
datas = []
for x in lis:
dic = {}
# 请求详细页面
ar = r = requests.get(‘https://www.runoob.com’+x,headers = headers).content.decode(‘utf-8’)
# print(ar)
# 解析为html文档
soup_ar = BeautifulSoup(ar,‘lxml’)
# print(type(soup_ar))
# 查找详细内容
# 1.查找标题
dic['title'] = soup_ar.find(id = 'content').h1.text
# print(title)
# 2.查找题目
dic['tm'] = soup_ar.find(id = 'content').find_all('p')[1].text
# print(tm)
# 3.查找程序分析
dic['cxfx'] = soup_ar.find(id = 'content').find_all('p')[2].text
# print(cxfx)
# 4.查找程序的源代码
try:
dic['ydm'] = soup_ar.find(class_ = 'hl-main').text
except Exception as e:
dic['ydm'] = soup_ar.find('pre').text #第41个程序的标签不符合要求,抛出异常不要了
# print(ydm)
# print(dic)
# 保存文件的形式存储
with open('100-py.csv','a+',encoding = 'utf-8') as file:
file.write(dic['title']+'\n')
file.write(dic['tm']+'\n')
file.write(dic['cxfx']+'\n')
file.write(dic['ydm']+'\n')
file.write('*'*50+'\n')
# datas.append(dic)