# -*- coding:utf-8 -*- import re from urllib import request, parse from random import choice import xlwt from tools import Tools class NeiHanSpider(object): def __init__(self): self.url = 'http://neihanshequ.com/joke/?is_json=0&app_name=neihanshequ_web&max_time=' # http://neihanshequ.com/joke/?is_json=0&app_name=neihanshequ_web&max_time=1520296373&is_json=1&app_name=neihanshequ_web&max_time=1520290223.0 self.UserAgents = [ 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0', 'Mozilla/5.0 (Windows NT 10; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0', 'Mozilla/5.0 (Windows; U; Windows NT 5.2) AppleWebKit/525.13 (KHTML, like Gecko) Version/3.1 Safari/525.13', ] self.html = '' # 添加数据表 self.textbook = xlwt.Workbook(encoding='utf-8') self.sheet = self.textbook.add_sheet('内涵段子') # 写入表头 self.sheet.write(0, 0, '头像图片') self.sheet.write(0, 1, '网名') self.sheet.write(0, 2, '发布时间') self.sheet.write(0, 3, '发布内容') self.sheet.write(0, 4, '点赞数') self.sheet.write(0, 5, '被踩数') self.sheet.write(0, 6, '收藏数') self.sheet.write(0, 7, '转发数') self.sheet.write(0, 8, '评论数') self.count = 1 def get_html(self, max_time='1520296373'): url = self.url + max_time req = request.Request( url, headers={ 'User-Agent': choice(self.UserAgents), } ) response = request.urlopen(req) self.html = response.read().decode('utf-8') def parse_html(self): pattern = re.compile('''<div class="detail-wrapper.*?<img.*?src="(.*?)".*?class="name">(.*?)</span>.*?title=.*?>(.*?)</span> .*?<p>(.*?)</p>.*?class="digg">(.*?)</span>.*?<span class="bury">(.*?)</span>.*?<span class="repin">(.*?)</span>.*?class="share">(.*?)</span> .*?class="comment J-comment-count">(.*?)</span>.*?''', re.S) result_list = re.findall(pattern, self.html) for rs in result_list: print('正在爬取第%s条段子' % self.count) date = Tools.strip_char(rs[2]) content = Tools.strip_char(rs[3]) self.sheet.write(self.count, 0, rs[0]) self.sheet.write(self.count, 1, rs[1]) self.sheet.write(self.count, 2, date) self.sheet.write(self.count, 3, content) self.sheet.write(self.count, 4, rs[4]) self.sheet.write(self.count, 5, rs[5]) self.sheet.write(self.count, 6, rs[6]) self.sheet.write(self.count, 7, rs[7]) self.sheet.write(self.count, 8, rs[8]) self.count += 1 # 匹配max_time的正则 max_pat = re.compile("max_time:'(.*?)'", re.S) res = re.search(max_pat, self.html) # if res: 只要有max_time就一直获取数据 # if self.count<300 直接结束,只爬取300条数据 if res: # 根据max_time 再次发起请求 self.get_html(res.group(1)) # 解析数据 # 在函数内部调用执行函数本身,这种方式叫做递归函数 self.parse_html() else: self.textbook.save('内涵段子.xls') def start_spider(self): self.get_html() self.parse_html() if __name__ == '__main__': NH = NeiHanSpider() NH.start_spider()
基于python的-爬取内涵段子
最新推荐文章于 2019-08-29 17:30:56 发布