运行结果如下:
这里对爬取的13页段子结果进行排版,排版的模块是引用一个小伙伴的模块Zstring
难点在于之前一直没有找对内容对应的标签,导致爬出来的是空的
源码如下:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/11 18:33
# @Author : huni
# @File : 糗事百科.py
# @Software: PyCharm
from threading import Thread #多线程的包
from queue import Queue #队列
from fake_useragent import UserAgent #模拟请求头的包,可用可不用,我这里没用,自己写的headers
import requests
from lxml import etree
import Zstring
class CrawlInfo(Thread):
#重写构造函数
def __init__(self,url_queue,html_queue):
Thread.__init__(self)
#声明两个类属性
self.url_queue = url_queue
self.html_queue = html_queue
#重写run方法
def run(self):
#爬虫代码
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.93 Safari/537.36'
}
while self.url_queue.empty() == False: #当url队列中不是空的就继续爬
url = self.url_queue.get() #从队列中获取一个url
reponse = requests.get(url=url,headers=headers)
if reponse.status_code == 200:
self.html_queue.put(reponse.text) #访问成功把html文件放进html队列中
class ParseInfo(Thread):
def __init__(self,html_queue):
Thread.__init__(self)
self.html_queue = html_queue
#重写run方法
def run(self):
while self.html_queue.empty() == False:
e = etree.HTML(self.html_queue.get()) #从html队列中过去一个html使用etree解析
span_contents = e.xpath('//div[@class="content"]/span[1]') #获取段子内容的上一级标签返回一个列表
author_list = e.xpath('//div[@class="author clearfix"]/a[2]')
with open('段子.txt','a',encoding='utf-8') as fp:
for span,au in zip(span_contents,author_list):
info = span.xpath('string(.)') #遍历列表,获取所有的文本内容
author = au.xpath('string(.)')
info = info.replace(' ', '')
info = info.replace('', '')
info = info.replace(' ', '')
author = author.replace(' ', '')
author = author.replace('', '')
author = author.replace(' ', '')
content = Zstring.String(info)
# author_name = Zstring.String(author)
mudul = content.paragraph(num=50, first_line=0, para=True)
fp.write(author + ':' + '\n' + mudul)
if __name__ == '__main__':
#创建一个存储有url和html的容器:队列
url_queue = Queue()
html_queue = Queue()
base_url = 'https://www.qiushibaike.com/text/page/{}/' #占位符填充
for i in range(1,14):
new_url = base_url.format(i) #占位符填充
url_queue.put(new_url)
crawl_list = [] #创建三个线程,加到线程列表中
for i in range(3):
Crawl = CrawlInfo(url_queue,html_queue)
crawl_list.append(Crawl)
Crawl.start()
for crawl in crawl_list:
#等待操作
crawl.join()
parse_list = []
for i in range(3):
parse = ParseInfo(html_queue)
parse_list.append(parse)
parse.start()
for parse in parse_list:
parse.join()