import html
import os
import threading
from concurrent.futures import ThreadPoolExecutor
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36'
}
def down(index, book_id, chapter_id, chapter_name):
if index == 201:
pass
url = f'http://www.lrts.me/ajax/path/4/{book_id}/{chapter_id}'
json = requests.get(url, headers=headers).json()
if json['status'] == 'success':
dir = os.path.join(os.getcwd(), '仙逆')
if not os.path.exists(dir):
os.makedirs(dir)
song_file = os.path.join(os.getcwd(), '仙逆', '%04d. %s.mp3' % (index, chapter_name))
if os.path.exists(song_file):
print(chapter_name, '下载完毕!')
else:
with open(song_file, 'wb') as f:
f.write(requests.get(json['data']).content)
print(chapter_name, '下载完毕!')
else:
song_file = os.path.join(os.getcwd(), '仙逆', '%04d. %s.mp3' % (index, chapter_name))
if os.path.exists(song_file):
print(chapter_name, '下载完毕!')
else:
print(chapter_name, '下载失败!')
def each_all(url):
count = 0
with ThreadPoolExecutor(max_workers=10) as pool:
book_id = url.split("/")[-1]
for i in range(5000):
json = requests.post(f'http://www.lrts.me/ajax/book/{book_id}/{i}/100').json()
if json['status'] == 'success':
if json['data']['data']:
for item in json['data']['data']:
count += 1
pool.submit(down, count, book_id, item['resId'], html.unescape(item['resName']))
else:
break
else:
break
pool.shutdown()
print('全部下载完毕:', count)
if __name__ == '__main__':
each_all('http://www.lrts.me/book/41087')