Homework_05

#下载所有的歌曲存入本地

import json
import requests
import multiprocessing

def text(path):
    song_play_url_list = []
    song_name_list = []
    with open(path,mode='r') as f:
        res = f.readlines()[0].strip('\n').split('}')
        for json_ in res[:-1]:
            _json = json_ + '}'
            _json = json.loads(_json)
            song_play_url = _json['song_play_url']
            if song_play_url is not None:
                song_play_url_list.append(song_play_url)
                song_name = _json['song_name']             
                song_name_list.append(song_name)
        return song_play_url_list , song_name_list 

song_url,song_name = text('E:/python/Homework05/top_500.txt')

def download(song_url,song_name):
    i = -1
    for path_ in song_url:
        i += 1
        response = requests.get(path_)
        mp3_ = response.content
        with open('E:/python/Homework05/songs/'+song_name[i]+'.mp3',mode='wb') as f:
            f.write(mp3_)

#download(song_url,song_name)
if __name__ == "__main__":
    x = int(len(song_url)/2)
    #创建进程
    p1 = multiprocessing.Process(target=download,args=(song_url[0:x],song_name[0:x]))
    p2 = multiprocessing.Process(target=download,args=(song_url[x:],song_name[x:]))
    #启动进程
    p1.start()
    p2.start()
    p1.join()
    p2.join()
    print("Over")

运行结果:
在这里插入图片描述
#2.想办法把 https://www.17k.com/list/3015690.html 页面中章节详情的内容URL给拿到
#2.1 做进程划分,爬取章节页面详情存储到本地,一个章节一个html文件.
#2.2 html = response.text
#2.3 如果你的请求返回出来的是乱码,设置response.encoding=‘utf-8’/‘gbk’…
#2.4 你把文章的内容给拿出来存到本地.

import multiprocessing
from lxml import etree
import requests
import re
def novel():
    response = requests.get('https://www.17k.com/list/3015690.html')   #请求网页 <Response [200]>
    response.encoding = 'utf8'
    tree = etree.HTML(response.text)    #调用HTML类进行初始化,这样就成功构造了一个XPath解析对象
    w_list = tree.xpath('//html/body/div[@class="Main List"]/dl[@class="Volume"]/dd/a')   #提取每一个a标签的内容
    #<a target="_blank" href="/chapter/3015690/38259921.html" title="第一章 和龙王谈交易
    url = []
    for i in w_list:
        href = i.xpath('./@href')[0]    #提取href标签的内容
        a  = 'http://www.17k.com/'
        html = a + href
        url.append(html)
    return url
html_list = novel()
def write_(html_list):
    b = 0
    for html in html_list:
        res = requests.get(html)
        res.encoding = 'utf8'
        #red = re.compile('<p>(.*?)</p>')
        #result = re.findall(red,rec)
        tree1 = etree.HTML(res.text)
        b_list = tree1.xpath('//html/body/div[@class="area"]/div[2]/div[2]/div[1]/div[2]/p/text()')  
        txt = str(b_list)
        #print(txt)
        b += 1
        with open('E:/python/Homework05/novel/'+str(b)+'.txt',mode='w') as f:
            f.write(txt) 

if __name__ == "__main__":
    x = int(len(html_list)/2)
    p1 = multiprocessing.Process(target=write_,args=(html_list[0:x],))
    p2 = multiprocessing.Process(target=write_,args=(html_list[x:],))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
    print("Over")

运行结果:
在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值