Python爬取网易云各类音乐的详细信息

python爬虫:通过selenium+requests爬取各类音乐的详细信息
完整代码如下:

from selenium import webdriver
from lxml import etree
import requests
import time

class MusicSpider(object):
    def __init__(self):
        self.driver = webdriver.Chrome("D:\pythonSpider\soft\chromedriver.exe")
        self.start_url = "https://music.163.com/#/discover/playlist/"
        self.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"}

    def get_sort_list(self):
        sort_list = self.driver.find_elements_by_xpath("//div[@id='cateListBox']/div/dl/dd/a")
        print(sort_list)
        return sort_list

    def get_music_list(self):
        music_list = self.driver.find_elements_by_css_selector(".msk")
        return music_list


    def get_content_dic(self,href,cookies):
        response = requests.get(href, headers=self.headers, cookies=cookies)
        html_str = response.content.decode()
        html = etree.HTML(html_str)
        item = {}
        item["music_name"] = html.xpath("//h2[@class='f-ff2 f-brk']/text()")[0]
        item["music_data_count"] = html.xpath("//a[@class='u-btni u-btni-fav ']/@data-count")[0]
        item["music_comment_count"] = html.xpath("//span[@id='cnt_comment_count']/text()")[0]
        item["music_intr"] = html.xpath("//p[@class='intr f-brk']/text()")[0]
        intr_list = html.xpath("//p[@class='intr f-brk']/br")
        for intr in intr_list:
            item["music_intr"] += intr.tail
        song_list = []
        li_list = html.xpath("//ul[@class='f-hide']/li")
        for li in li_list:
            song = {}
            song["song_title"]= li.xpath("./a/text()")[0]
            song["song_href"]=li.xpath("./a/@href")[0]
            # song_list["song_time"] = tr.xpath(".//span[@class='u-dur']/text()")
            # song_list["song_singer"] = tr.xpath(".//div[@class='text']/@title")
            # song_list["song_name"] = tr.xpath(".//div[@class='text']/a/@title")
            song_list.append(song)
        item["item_song_list"] =song_list
        print(item)
        return item

	#保存数据
   	def save_content(self,content):
        with open("music.txt","a",encoding="utf-8") as f:
            f.write(str(content))
            f.write("\n")
            
    def run(self):
        # start_url
        # 发送请求,获取响应
        self.driver.get(self.start_url)
        # 获取数据
            #获取分类
        self.driver.switch_to.frame("contentFrame")
        self.driver.find_element_by_xpath("//div[@class='u-title f-cb']//a[@id='cateToggleLink']").click()
        sort_list= self.get_sort_list()
        sort_list_len = len(sort_list)
        for j in range(sort_list_len):
            sort_list[j].click()
            time.sleep(3)
            # 获取音乐列表
            music_list = self.get_music_list()
            print(music_list)
            music_list_len = len(music_list)
            for i in range(music_list_len):
                href = music_list[i].get_attribute("href")
                print(href)
                music_list[i].click()
                # 获取音乐内容
                time.sleep(3)
                cookies = self.driver.get_cookies()
                cookies = cookies[-1]
                cookies ={
                    "domain": cookies["domain"],
                    "name": cookies["name"],
                    "value": cookies["value"],
                    "path": cookies["path"]
                }
                time.sleep(3)
                self.get_content_dic(href,cookies)
                # 保存
                self.save_content(content)
                print('*'*20)
                self.driver.back()
                time.sleep(3)
                self.driver.switch_to.frame("contentFrame")
                music_list = self.get_music_list()
                print(music_list)
                time.sleep(3)
            self.driver.back()
            time.sleep(3)
            self.driver.switch_to.frame("contentFrame")
            sort_list = self.get_sort_list()
            time.sleep(3)
        self.driver.quit()


if __name__ == "__main__":
    music = MusicSpider()
    music.run()

所遇问题:

  • 批量操作循环点击的时候,返回上一页会报错:elenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document,因为再次返回上一页时,原先列表元素已经过期了,因此需要再次重新刷新列表元素
    参考链接
    改正后:
		for i in range(music_list_len):
                href = music_list[i].get_attribute("href")
                print(href)
                music_list[i].click()
                # 获取音乐内容
                time.sleep(3)
                # 获取cookies值
                cookies = self.driver.get_cookies()
                cookies = cookies[-1]
                cookies ={
                    "domain": cookies["domain"],
                    "name": cookies["name"],
                    "value": cookies["value"],
                    "path": cookies["path"]
                }
                time.sleep(3)
                self.get_content_dic(href,cookies)
                # 保存
                self.save_content()
                print('*'*20)
                #回到上一页
                self.driver.back()
                time.sleep(3)
                #必须再次定位到frame
                #self.driver.switch_to.frame("contentFrame")
                #重新加载循环列表
                music_list = self.get_music_list()
                print(music_list)
                time.sleep(3)
           

运行后,却发现重新定位列表为空,后来发现重新定位循环列表时,因为是在iframe中,所以必须再次进入iframe中

  				self.driver.back()
                time.sleep(3)
                #必须再次定位到frame
                self.driver.switch_to.frame("contentFrame")
                #重新加载循环列表
                music_list = self.get_music_list()
  • 在获取音乐详情时,用了
def get_content_dic(self,href,cookies):
        response = requests.get(href, headers=self.headers, cookies=cookies)
        html_str = response.content.decode()
        html = etree.HTML(html_str)
        item = {}
        item["music_name"] = html.xpath("//h2[@class='f-ff2 f-brk']/text()")[0]
        item["music_data_count"] = html.xpath("//a[@class='u-btni u-btni-fav ']/@data-count")[0]
        item["music_comment_count"] = html.xpath("//span[@id='cnt_comment_count']/text()")[0]
        item["music_intr"] = html.xpath("//p[@class='intr f-brk']/text()")[0]
        intr_list = html.xpath("//p[@class='intr f-brk']/br")
        for intr in intr_list:
            item["music_intr"] += intr.tail
        song_list = []
        li_list = html.xpath("//ul[@class='f-hide']/li")
        for li in li_list:
            song = {}
            song["song_title"]= li.xpath("./a/text()")[0]
            song["song_href"]=li.xpath("./a/@href")[0]
            # song_list["song_time"] = tr.xpath(".//span[@class='u-dur']/text()")
            # song_list["song_singer"] = tr.xpath(".//div[@class='text']/@title")
            # song_list["song_name"] = tr.xpath(".//div[@class='text']/a/@title")
            song_list.append(song)
        item["item_song_list"] =song_list
        print(item)
        return item

在用xpath定位元素时,总是出错,后来发现Element和Network中response返回的页面是不同的,故应该以response中为准,定位元素

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值