import json
import time
import requests
from os import path
from queue import Queue
from lxml import etree
from threading import Thread
from fake_useragent import UserAgent
from retrying import retry
class XiaoSshuoSpider(object):
def __init__(self):
self.url = 'http://www.55shuba.com/top/allvisit_{}.htm'
self.headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "bdshare_firstime=1590149393888; UM_distinctid=1724548cc5b35-0819686bf4beda-58143718-144000-1724548cc5c2a7; CNZZDATA1254488625=180465324-1590297470-http%253A%252F%252Fwww.55shuba.com%252F%7C1590302884",
"Host": "www.55shuba.com",
"Referer": "http://www.55shuba.com/top/allvisit_1.htm",
"Upgrade-Insecure-Requests": "1",
"User-Agent": ua.random # 使用random获取随机的用户代理
}
self.queue = Queue() # 创建先进先出队列
self.info = {} # 用来保存爬取后的数据
self.count = 0 # 用来计数
@retry(stop_max_attempt_number=3) # retry的使用,当我们设置timeout三次都超时报错
def get_url_title(self):
while not self.queue.empty(): # 如果队列不为空就一直从队列中取出url并进行爬取
res = requests.get(self.queue.get(), headers=self.headers, timeout=5).content.decode('gbk', errors='ignore')
self.count += 1 # 计数
print('正在爬取第{}页'.format(self.count))
time.sleep(1) # 爬取1个页面就睡眠1秒
# 使用xpath提取小说名和小说的唯一编号
html = etree.HTML(res)
# 基准xpath提取数据
dls = html.xpath('//div[@class="listtab"]//dl')
for dl in dls:
novel_name = dl.xpath('.//a/@title')[0] # 提取小说名
url_index = dl.xpath('.//a/@href')[0] # 提取小说的url地址
url_index = path.split(url_index)[-1][:-4] # 从小说url地址提取出小说的编号
'''例如:url = 'http://www.55shuba.com/txt/20394.htm
url_list = path.split(url_index)[-1] # 得到的是一个元组,默认是以/为分隔符,而且是以最后面的/进行分割
分割后结果:('http://www.55shuba.com/txt', '20394.htm')
'''
self.info[novel_name] = url_index
# 保存为json数据
def save_data(self):
with open('F:/文本/novel.json', 'w', encoding='utf-8')as f:
f.write(json.dumps(self.info, ensure_ascii=False))
print('保存完毕')
def main(self):
thread_list = []
for i in range(1, 1863): # 使用for循环创建将所有索引页的url放入队列中。
self.queue.put(self.url.format(i))
for i in range(10): # 创建10个线程
t1 = Thread(target=self.get_url_title)
thread_list.append(t1)
t1.start()
for thread in thread_list:
thread.join()
if __name__ == '__main__':
start_time = time.time()
try:
ua = UserAgent() # 实例化对象,调用其中的random即可获取随机的user-agent
spider = XiaoSshuoSpider()
spider.main()
spider.save_data()
print('花费{}s'.format(time.time()-start_time))
except Exception as e:
print('下载出错 {}'.format(e))