- 版权声明:本文为博主原创文章,未经博主允许不得转载。 https://blog.csdn.net/kun1280437633/article/details/80473801
import random
import re
from html.parser import HTMLParser
import requests
DEBUG = False
from retrying import retry # 导入重试模块
class NeihanpaSpider:
def __init__(self):
self.base_url = "http://www.neihanpa.com/"
# 不能 stop_max_attempt_number 重试次数
@retry(stop_max_attempt_number=5)
def __parse_url(self,url, headers={}, method="get", params={}):
"""
此方法封装底层原始的网络请求方案,如果未来
:param url:
:return:
"""
print("重试")
# timeout 实现超时处理
if method == 'get':
response = requests.get(url, headers=headers, timeout=5, params=params)
print(url)
print(params)
elif method == 'post':
response = requests.post(url, headers=headers, timeout=5, data=params)
html = response.content.decode('utf-8')
return html
def parse_url(self,url, method="get", headers={}, params={}):
"""
实现网络请求的具体逻辑
:param url:
:return:
"""
# 随机请求头
req_headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36",
}
# 合并请求头
req_headers = dict(req_headers, **headers)
html = None
try:
html = self.__parse_url(url, headers=req_headers, method="get", params=params)
except:
html = None
return html
def save(self, content):
print(content)
def run(self):
# 1. 获取数据
# 1.1 分析
'''
分析:
1. 爬取流程
第一页 http://www.neihanpa.com/article/index.html
http://www.neihanpa.com/article/index_{页数}.html
http://www.neihanpa.com/e/action/ListInfo/?classid=11&page={}
-> 列表 -> 获取详情链接 -> 获取详情页面 -> 提取数据
'''
html_parser = HTMLParser()
# 定义详情页超链接提取
detail_link_pattern = re.compile(r'<a href="(.*)" class="title" title=')
# 初步提取 正则表达式 (需要DOTALL模式提取)
detail_div_pattern = re.compile(r'<div class="detail">(.*)<div class="art_newding"', re.S)
# 精准提取
detail_part_pattern = re.compile(r'<p>(.*)</p>')
for i in range(1094):
list_url = "http://www.neihanpa.com/e/action/ListInfo"
params = {
"classid": 11,
"page": i
}
# 获取列表页面html
list_html = self.parse_url(url=list_url, method='get', params=params)
# 从列表页中提取 详情页的url地址
links = detail_link_pattern.findall(list_html)
for link in links:
# 详情链接
detail_link = self.base_url + link
# 获取详情内容html
detail_html = self.parse_url(url=detail_link)
# 2. 提取数据
# 提取具体内容分两步走
# 第一步初步提取内容
detail_div_htmls = detail_div_pattern.findall(detail_html)
if len(detail_div_htmls) == 0: break
detail_div_html = detail_div_htmls[0]
content = ""
# 第二步精准化提取内容
parts = detail_part_pattern.findall(detail_div_html)
for part in parts:
# 对内容进行 html 转义处理 给大家介绍一个库 HTMLParser
part = html_parser.unescape(part)
# 对内容进行 trip处理
part = part.strip()
# 把内容拼装起来
content = content + part + "\n"
# 3. 存储数据
self.save(content)
if DEBUG: break
if DEBUG: break
if __name__ == '__main__':
spider = NeihanpaSpider()
spider.run()