仅供学习,请勿商业行为!,未经允许请勿转载
相关驱动获取 查看文章
废话不啰嗦直接上代码
import requests
import re
import time
from bs4 import BeautifulSoup
import os
import threading
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
import math
import json
# 漫画爬取
class manhua(threading.Thread):
def __init__(self,bookname):
threading.Thread.__init__(self)
self.domain = "私聊获取网址"
# 获取书名
self.bookname = bookname
def search(self):
url = self.domain+"/spotlight/?keyword="+self.bookname
search_html = requests.get(url)
# exit()
soup = BeautifulSoup(search_html.text,'html.parser')
list = soup.select("body > main > div > div")
status = 0
for content in list:
# print(content)
link = re.findall(r'<p class="comic-name"><a href="(.*?)">.*?</a></p>',str(content))
title = re.findall(r'<p class="comic-name"><a href=".*?">(.*?)</a></p>',str(content))
if title[0] == self.bookname:
status = 1
# 漫画名
self.book_title = title[0]
self.cratedir(self.book_title)
self.getlist(link[0])
if status == 0:
print("没有搜索到当前漫画")
exit()
# print(list)
def cratedir(self,file_dir):
if not os.path.exists(file_dir):
os.makedirs(file_dir)
print("创建目录 "+file_dir+" 成功")
else:
print("目录已存在 "+file_dir)
def getlist(self,booklink):
link = self.domain+"/bookchapter/"
new_link = self.domain+booklink
id = booklink.replace('/','')
# 漫画id
self.book_id = id
data = requests.post(link,{'id':id,'id2':1})
# 反序列化json数据
content = json.loads(data.content)
# 获取最新章节
new_response = requests.get(new_link)
soup = BeautifulSoup(new_response.text,"html.parser")
new_list = soup.select("body > main > div.detail-content > div.catalog-box > div.list-wrap > div > ul > li > a")
new_json = []
for index in new_list:
new_id = re.findall(r'<a href="'+booklink+'(.*?).html"',str(index))[0]
new_title = re.findall(r'<a .*?>(.*?)</a>',str(index))[0]
new_json.append({'id':new_id,'name':new_title})
# 获取最新章节
# 合并列表
all_content = new_json+content
for index in all_content:
# 漫画章节id
details_id =index['id']
# 漫画章节名称
details_name =index['name']
self.getDetails(details_id,details_name)
time.sleep(4)
def getDetails(self,details_id,details_name):
details_link = self.domain+"/"+str(self.book_id)+"/"+str(details_id)+".html"
# 设置无界面打开浏览器 后台运行 无浏览器模式 注释则是浏览器模式
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
s = Service("D:\pythonVendor\chrome\chromedriver.exe")
driver = webdriver.Chrome(service=s, options=chrome_options)
# 打开网站
driver.get(details_link)
time.sleep(8)
t = True
time.sleep(1)
# 获取 滚动条长度
ScrollBarlen = (driver.execute_script("window.scrollTo(0,document.body.clientHeight);ScrollBarlen=document.body.clientHeight-document.body.scrollTop;return ScrollBarlen"))
# 循环总数
sum = math.ceil(ScrollBarlen / 1000)
for index in range(sum):
start = 1100 * (index+1)
# print(start)
# 滚动条下拉 到指定位置
driver.execute_script("window.scrollTo(0,"+str(start)+")")
time.sleep(1)
# 直接显示最底部
js = "var q=document.documentElement.scrollTop=100000000"
driver.execute_script(js)
# exit()
soup = BeautifulSoup(driver.page_source,"lxml")
list = soup.select("#mainView > ul > li > img")
self.cratedir(self.bookname+"/"+str(details_name))
i = 1
for value in list:
image_link = re.findall(r'<img .*? src="(.*?)"',str(value))
image_dir = self.bookname+"/"+str(details_name)+"/"+str(i)+".jpg"
if not os.path.exists(image_dir):
self.download_image(image_link[0],image_dir)
else:
print("文件:"+ image_dir +" 已存在")
i = i+1
time.sleep(2)
driver.quit()
def download_image(self, image_link, image_dir):
image = requests.get(image_link)
if not os.path.exists(image_dir):
file = open(image_dir,'wb')
file.write(image.content)
print("创建成功"+image_dir+'\n')
def run(self):
print("开始")
self.search()
if __name__ == "__main__":
name = input("请输入下载的漫画 ")
# 设置线程
thread1 = manhua(name)
# 开始线程
thread1.start()
thread1.join()
效果图: