整体思路
通过request的get方法下载贴吧信息,然后保存到本地
# ProjectName:apiAutoTest1
# FileName:spider_tieba
# Author:wangchao
# CreateTime:2021/6/2 17:10
# Description: 从贴吧下载数据
# 贴吧请求是https://tieba.baidu.com/f?kw=nico&ie=utf-8&pn=350,其中kw参数是关键字,ie=utf-8是固定的,pn是多少条数据,贴吧是50条数据进行分页
import requests
class spider_teiba:
# 初始化
def __init__(self, headers, key_word):
self.headers = headers
self.key_word = key_word
self.url = "https://tieba.baidu.com/f?kw=" + key_word + "&ie=utf-8&pn={}"
# 根据分页生成url_list
def url_list(self):
urlList = []
for i in range(1, 1000):
urlList.append(self.url.format(i * 50))
return urlList
# 发送get请求
def sent_url(self, url):
return requests.get(url, headers=self.headers)
# 保存下载的文件
def save_file(self, page_num, file):
path = "/Volumes/文件盘/Spider/{}吧_第{}页数据".format(self.key_word, page_num)
with open(path, "w", encoding="utf-8") as f:
f.write(file)
def run_main(self):
url_list = self.url_list()
for url in url_list:
response = self.sent_url(url).content.decode()
page_num = url_list.index(url) + 1
self.save_file(page_num, response)
if __name__ == "__main__":
key_word = input("请输入你想要下载的贴吧信息")
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}
spider1 = spider_teiba(headers, key_word)
spider1.run_main()