22-爬取网易云所有歌单的歌曲

目的:把网易云中所有的歌单中的歌曲下载下来(我现在爬取的时候是38页歌单)

结果:把歌曲下载下来,保存到相应文件夹下

注:网易云真的。。。。。加密做的。。。。。害我看了一下我的js中的function。。。。。太费时间了!!!!整个思路我还是参照知乎的大神的思路(链接:https://www.zhihu.com/question/36081767)

再次感叹:!!!大神就是大神!!!

注:直接写的就是面向对象的方式~~~这样至少看起来,,,,,,我有点厉害】   【就不放我的思考过程了,代码也很简单,就是!!!加密!!!】

再次吐槽:网易云的加密,真的太坑了!!!

#下面为本实例的爬虫代码,若有问题可以给我留言,或者有更好的解决方法也可以私信我~

# 申明:需要对每个歌曲的id进行加密,获得相关的params和encSecKey参数,将其组合成data,最后一起访问url
import base64
import json
from binascii import hexlify
import os
from bs4 import BeautifulSoup
import requests
from Crypto.Cipher import AES
from scrapy.selector import Selector

class Encrypt_id():  # Encrypt_id是根据传入的歌曲id,返回data={'params':  ,'encSecKey':}
    def __init__(self):
        self.pub_key = '010001'
        self.modulus = '00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7’       self.nonce = '0CoJUm6Qyw8W8jud'

    def create_secret_key(self, size):
        return hexlify(os.urandom(size))[:16].decode('utf-8')

    def aes_encrypt(self, text, key):
        iv = '0102030405060708'
        pad = 16 - len(text) % 16
        text = text + pad * chr(pad)
        encryptor = AES.new(key, AES.MODE_CBC, iv)
        result = encryptor.encrypt(text)
        result_str = base64.b64encode(result).decode('utf-8')
        return result_str

    def rsa_encrpt(self, text, pubKey, modulus):
        text = text[::-1]
        rs = pow(int(hexlify(text.encode('utf-8')), 16), int(pubKey, 16), int(modulus, 16))
        return format(rs, 'x').zfill(256)

    def main(self,
             text):  # 注:text = {'ids': [64006], 'br': br, 'csrf_token': ''}   #注ids就是对应的歌曲的id,br默认都是128000,csrf_token可以为空
        text = json.dumps(text)
        i = self.create_secret_key(16)
        encText = self.aes_encrypt(text, self.nonce)
        encText = self.aes_encrypt(encText, i)
        encSecKey = self.rsa_encrpt(i, self.pub_key, self.modulus)
        data = {'params': encText, 'encSecKey': encSecKey}
        return data

class WYY():
    def __init__(self):
        self.start_url = 'https://music.163.com/discover/playlist/'
        self.base_url = 'https://music.163.com'
        self.headers = {'user-agent': 'Mozilla/5.0'}
        self.EI = Encrypt_id()  # 实例化对象

    def get_page(self,url):
        try:
            r = requests.get(url, headers=self.headers)
            r.raise_for_status()
            r.encoding = r.apparent_encoding
            return r.text
        except Exception as e:
            print(e)

    def get_playlist_num(self,url):  # 获取一共多少页的歌单
        html = self.get_page(url)
        soup = BeautifulSoup(html, 'html.parser')
        pages = soup.find('div', {'class': {'u-page'}})
        page = pages('a')[-2].text.strip()  # 一共38页  【字符串格式】
        return int(page)

    def get_playlist(self,url):  # 获取所有的播放列表
        num = self.get_playlist_num(url)  # 获取一共38页
        playlist = []
        catelist = []
        for i in range(num+1):  # num+1
            page_url = url + '?order=hot&offset=' + str(35 * (i - 1))  # 得到每一页discover/playlist
            html = self.get_page(page_url)
            soup = BeautifulSoup(html, 'html.parser')
            all_a = soup.find_all('a', {'class': {'msk'}})
            for a in all_a:
                href = a['href']
                playlist_url = self.base_url + href
                playlist.append(playlist_url)
                title = a['title'].strip()
                title = title.replace('/', '').replace(' ', '').replace('|', '')
                catelist.append(title)
        # print(len(playlist),playlist)
        return playlist, catelist

    def get_songsurl(self,item):  # 获取每个列表中所有的歌曲的url
        html = self.get_page(item)
        sel = Selector(text=html)  # 用scrapy的Selector
        songurls = sel.xpath('//ul[@class="f-hide"]/li/a/@href').extract()
        # print(songurls)  #['/song?id=1301574905', '/song?id=99181', ......,'/song?id=5280097']
        return songurls

    def get_info(self,song):
        song_url = self.base_url + song
        html = self.get_page(song_url)
        soup = BeautifulSoup(html, 'html.parser')
        name = soup.find('em', {'class': {'f-ff2'}}).text.strip()
        singer = soup.find('p', {'class': {'s-fc4'}})('span')[0]['title']
        singer = singer.replace('/', '_').replace(' ', '')  # 防止出现多个歌手
        return singer, name

    def get_truth_songurl(self,song):  # EI.work输入歌曲ID,解码后返回data,{params 'encSecKey}然后post,得出歌曲所在url'''
        ids = song.split('=')[-1]
        br = 128000  # 默认都是128000
        text = {'ids': [ids], 'br': br, 'csrf_token': ''}
        data = self.EI.main(text)
        headers = {'user-agent': 'Mozilla/5.0'}
        url = 'http://music.163.com/weapi/song/enhance/player/url?csrf_token='
        r = requests.post(url, data=data, headers=self.headers)
        info = json.loads(r.text)
        truth_url = info['data'][0]['url']  # 歌曲的正确下载地址
        return truth_url

    def download(self,song, cate):
        singer, name = self.get_info(song)  # 得到每首歌的singer,name
        truth_url = self.get_truth_songurl(song)
        path1 = './网易云歌单/' + cate + '/'
        if not os.path.exists(path1):
            os.makedirs(path1)
        path = path1 + singer + '_' + name + '.mp3'
        r = requests.get(truth_url)
        with open(path, 'wb')as f:
            f.write(r.content)
        print('{}---{}--done!'.format(singer,name))

    def main(self):
        playlist, catelist = self.get_playlist(self.start_url)
        for i in range(len(playlist)):
            print('=======当前正在下载第{}页======='.format(i+1))
            songsurl = self.get_songsurl(playlist[i])
            print('当前的歌单===》{}'.format(catelist[i]))
            for song in songsurl:  # /song?id=1301574905
                try:
                    self.download(song, catelist[i])
                except:
                    continue
wyy_music=WYY()
wyy_music.main()

屏幕显示:7ac069d192ae7fb7f3a2f4084934206755f.jpg【这是部分,太多了】

文件显示:4c102c96e30fc8d926d359a9810fe8bd650.jpg【这只是部分,太多了】

面向对象爬取完成!

---------(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)(。・ω・。)----------

注:以后写代码,我应该都是直接写面向对象的形式!【!!!欢迎大家监督!!!】

今日爬虫完成!

今日鸡汤:坚持自己坚持的,珍惜拥有的美好,对生活不苟且,不随便,让每一天都充满阳光。这样的人生,才能清清楚楚,明明白白!

加油ヾ(◍°∇°◍)ノ゙​​​​​​​

转载于:https://my.oschina.net/pansy0425/blog/3027542

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值