import requests
class TiebaSpider:
"""创建贴吧爬虫类"""
def __init__(self, tieba_name):
"""初始化方法,传入需要爬取的贴吧的名称:六人行(老友记)"""
self.tieba_name = tieba_name
# 通过观察贴吧的url地址
# https://tieba.baidu.com/f?kw=六人行&ie=utf-8&pn=50
# ,我们可以发现url地址中通过kw来控制贴吧名称 通过pn来控制页数,所以我们代码可以将这两处做参数化
self.url_temp = "https://tieba.baidu.com/f?kw="+tieba_name+"&ie=utf-8&pn={}"
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36"}
def get_url_list(self):
"""获取请求列表"""
url_list = list()
for i in range(1000):
# 遍历页数,将获取到的页数传入类属性 url_temp中完成参数 pn的值填充
url_list.append(self.url_temp.format(i*50))
return url_list
def run_url(self, url):
"""执行请求"""
print(url)
response = requests.get(url, headers=self.headers)
return response.content.decode()
def save_html(self, html_str, page_num):
"""保存爬取到的网页"""
file_path = "{}-第{}页.html".format(self.tieba_name, page_num)
with open(file_path, "w", encoding="utf-8") as f:
f.write(html_str)
def run(self):
# 构造url列表
url_list = self.get_url_list()
# 遍历请求列表,发送请求,获取响应内容
for i in url_list:
html_str = self.run_url(i)
# 3、保存
page_num = url_list.index(i) # 因为我们是按照页码来构建的url地址,所以i所在url_list列表的下标就是页码数
self.save_html(html_str, page_num)
if __name__ == '__main__':
tieba_spider = TiebaSpider("六人行")
tieba_spider.run()
结果:
打开获取到的网页和原始网页做对比,会发现我们已经正确获取到了网页的内容