from gevent import monkey
monkey.patch_all()#把程序变成协作式运行模式,这样就能实现程序异步
import requests,time,gevent,csv,random
from gevent.queue import Queue
from bs4 import BeautifulSoup
#以上引入需要的库和模块,gevent帮我们实现多协程;time帮我们记录程序运行时间;requests帮我们实现爬取多个网站;BeautifulSoup帮我们分析和提取数据;csv帮我们保存数据;
start = time.time()
agent_list = [
‘Mozilla/5.0(WindowsNT10.0;WOW64)AppleWebKit/537.36(K HTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36’,
‘Mozilla/5.0(compatible;MSIE9.0;WindowsNT6.1;Trident/5.0’,
‘Mozilla/4.0(compatible;MSIE6.0;WindowsNT5.1)’]
headers = {
‘User-Agent’: random.choice(agent_list)
}
#随机请求头
url_list = []
for i in range(0,3):#利用for循环获取小说url,没个网页url不同,不同的网页有时需要从HTML代码中自行提取关键词拼接
url = ‘https://xxxxxxxx/8/8856/%s.html’%(i+1)#隐藏了url,可自行填写
url_list.append(url)
work = Queue()#创建队列对象
for url in url_list:
i = work.put_nowait(url)
#将爬取的URL存入队列
def book():#定义book函数
while not work.empty():
url1 = work.get_nowait() #用get_nowait()方法从队列里把刚刚放入的网址提取出来
res = requests.get(url1,headers=headers)
res.encoding = ‘gutf-8’
soup = BeautifulSoup(res.text,‘html.parser’)
name = soup.find(‘div’,class_ = ‘size18 w100 text-center lh100 pt30 pb15’)
content = soup.find(‘div’,id = ‘pt-pop’)
#以上为获取小说相关信息
file = open(’/user/wudixianzun1.txt’,‘a’,encoding=‘utf-8-sig’)#存储结果,路径为自己电脑路径
file.write(name.text)
file.write(content.text)
file.close()
print(name.text)#打印章节,获取进度
def task1():
tasks_list = []
for x in range(1):
task = gevent.spawn(book)
tasks_list.append(task)
gevent.joinall(tasks_list)
end = time.time()
print(end-start)
def main():
book()
task1()
if name == ‘main’:
main()
注:获取免费免费离线小说利器