使用到的库:
- BeautifulSoup 网页解析,查找网页内容
- openpyxl excel 文件的读写
- xlwt 此库只能用来创建写入新的excel 表
import requests
from bs4 import BeautifulSoup
from openpyxl import load_workbook
import xlwt
headers = {
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version /13.0.3 Mobile/15E148 Safari/604.1",
"Connection": "keep-alive"
}
# 第一步:从网站的目录页,获取每篇文章的标题和链接(总共3页)
data= []
for i in range (1,4):
url_set ='https://cn.ecbos.com/insights_' + str(i) +'.html'
html = requests.get(url_set,headers)
soup = BeautifulSoup(html.content,"lxml")
data += soup.findAll('a',class_ = 'New-list-session-header')
# 第二步:创建一个新的excel,工作表,并设置前三列的表头
work_book = xlwt.Workbook(encoding='utf-8')
sheet = work_book.add_sheet('环保法规专家观点')
sheet.write(0,0,'文章标题')
sheet.write(0,1,'地址链接')
sheet.write(0,1,'文章内容')
# 第三步:把第一步获取到的data 信息循环写入到sheet 表中
row_num = 1
for item in data:
title = item.getText()
url = item.get('href')
sheet.write(row_num,0,title)
sheet.write(row_num,1,url)
row_num += 1
# 第四步:将工作表,excel文件,保存到本地路径
file_name = r'D:\阿里云天池\爬虫\环保法规爬虫\data.xlsx'
work_book.save(file_name)
第二阶段:根据搜集到的地址链接,爬取对应的文章内容
import requests
from bs4 import BeautifulSoup
from openpyxl import load_workbook
# 第一步:读取本地的EXCEL文件
file_name = r'D:\阿里云天池\爬虫\环保法规爬虫\data.xlsx'
workbook = load_workbook(file_name)
sheet_1 = workbook.get_sheet_by_name('环保法规专家观点')
headers = {
"User-Agent": "Mozilla/5.0 (iPad; CPU OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1",
'Connection':'close'
}
# 第二步:定义函数(爬取text,写入到sheet表的第三列)
def get_text():
html = requests.get(url,headers)
soup = BeautifulSoup(html.content,'html.parser')
data=soup.findAll('p')
text =''
for item in data:
text +=item.getText()
sheet_1.cell(i,3,text)
# 第三步:主要框架
row_num = sheet_1.max_row
for i in range(2,row_num+1):
label = sheet_1.cell(i,2).value #循环读取sheet表第二列的值
url = 'https://cn.ecbos.com' + label #构造URL 地址
get_text() #调用爬虫函数
workbook.save(file_name)