今天的作业很难,我真的不会!!!!
import json
import requests
import multiprocessing
def text(path):
song_play_url_list = []
song_name_list = []
with open(path,mode='r') as f:
res = f.readlines()[0].strip('\n').split('}')
for json_ in res[:-1]:
_json = json_ + '}'
_json = json.loads(_json)
song_play_url = _json['song_play_url']
if song_play_url is not None:
song_play_url_list.append(song_play_url)
song_name = _json['song_name']
song_name_list.append(song_name)
return song_play_url_list , song_name_list
song_url,song_name = text('C:/Users/admin/Desktop/yinyue/top_500.txt')
def download(song_url,song_name):
i = -1
for path_ in song_url:
i += 1
response = requests.get(path_)
mp3_ = response.content
with open('D:/music/'+song_name[i]+'.mp3',mode='wb') as f:
f.write(mp3_)
#download(song_url,song_name)
if __name__ == "__main__":
x = int(len(song_url)/2)
#创建进程
p1 = multiprocessing.Process(target=download,args=(song_url[0:x],song_name[0:x]))
p2 = multiprocessing.Process(target=download,args=(song_url[x:],song_name[x:]))
#启动进程
p1.start()
p2.start()
p1.join()
p2.join()
print("Over")
结果显示:
2.想办法把 https://www.17k.com/list/3015690.html 页面中章节详情的内容URL给拿到
2.1 做进程划分,爬取章节页面详情存储到本地,一个章节一个html文件.
2.2 html = response.text
2.3 如果你的请求返回出来的是乱码,设置response.encoding=‘utf-8’/‘gbk’…
2.4 你把文章的内容给拿出来存到本地.