from urllib.request import urlretrieve
import requests
import re
import execjs
from bs4 import BeautifulSoup
class Anime(object):
def __init__(self):
self.headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/101.0.4951.54 Safari/537.36'}
self.base_url = 'https://www.dmzj.com/info/tuyashouji.html'
def ask_base_url(self):
resp = requests.get(self.base_url, headers=self.headers)
html = resp.text
soup = BeautifulSoup(html, 'html.parser')
lis = soup.find('ul', class_='list_con_li autoHeight')
list_img = []
for li in lis:
try:
url = li.find('a')['href']
list_img.insert(0, url)
except:
pass
return list_img
def ask_img_url(self, img_list):
for img_url in img_list[:]:
html = requests.get(img_url, headers=self.headers)
html = BeautifulSoup(html.text, 'html.parser')
script_info = html.script
js_str = re.findall('eval\((.*?)\)\n', str(script_info))
pages_img = []
if js_str:
js_str = js_str[0].replace('function(p,a,c,k,e,d)', 'function fun(p, a, c, k, e, d)')
fun = """
function run(){
var result = %s;
return result;
}
""" % js_str
pages = execjs.compile(fun).call('run')
img_url_all = re.findall(r'chapterpic\\/(.*?).jpg', pages)
for img in img_url_all:
img = img.replace('\\', '')
new_img = "https://images.dmzj.com/img/chapterpic/" + img.replace('\\', '') + '.jpg'
pages_img.append(new_img)
else:
print(f'章节为空')
# download_img(pages_img)
return pages_img
def download_img(self, new_list):
page_img = 0
for url_img in new_list[:]:
try:
urlretrieve(url_img, f'/Users/jinnan/PycharmProjects/boss_mes/img/{page_img}.jpg')
page_img += 1
except:
print('第 ' + str(page_img) + ' 页图片下载失败!对应url为: ' + url_img)
def main(self):
img_list = self.ask_base_url()
new_list = self.ask_img_url(img_list)
self.download_img(new_list)
def start():
op.main()
if __name__ == '__main__':
op = Anime()
start()
dmzj 内容抓取
于 2022-05-09 23:57:07 首次发布