import re
import requests
class NeiHanDuanZi(object):
def __init__(self, page=1):
assert page > 0
self.__page = page
self.__url = "http://www.haoduanzi.com/category/?1-{pages}.html"
# 初始化一个文件对象
self.__duanzi_file = open("NeiHanDuanZi.json", "a+", encoding="utf-8")
self.__first_filter = re.compile(r"<div class=\"content\"><a.*?>(.*?)</a></div>", re.S)
self.__last_filter = re.compile(r"<.+?>|\n|\u3000", re.S)
pass
# 在对象被释放的时候需要关闭文件流
def __del__(self):
print("对象被释放了 关闭文件流 ................")
self.__duanzi_file.close()
def __send_request(self, url):
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"}
print("请求的地址为->>> {}".format(url))
return requests.get(url, headers=headers).content.decode("utf-8")
def __filter_content(self, content):
if content is not None:
r1 = self.__first_filter.findall(content)
r2 = self.__last_filter.sub("", "&@&,".join(r1))
return r2
else:
return ""
def __save_file(self, result_content):
print("存储文件->>>> {}".format(result_content))
self.__duanzi_file.writelines(result_content + "\n")
pass
def run(self):
for i in range(self.__page):
# 1.请求地址获取数据
temp_url = self.__url.format(pages=i + 1)
temp_content = self.__send_request(temp_url)
# 2.处理请求清洗返回的数据
result_content = self.__filter_content(temp_content)
# 3.保存添加到文件中
self.__save_file(result_content)
if __name__ == '__main__':
neihanduanzi = NeiHanDuanZi(50)
neihanduanzi.run()
Python-批量爬取段子数据并保存到文件中
最新推荐文章于 2024-06-03 09:25:06 发布