import csv
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
# 定义爬虫函数
def crawl_kuwo_top60(url):
driver = webdriver.Chrome()
driver.get(url)
# 计数器,记录已爬取的页数
page_count = 0
while page_count < 3: # 只爬取前三页
# 等待一段时间,确保页面加载完成
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, ".page-wrap.pagination")))
# 获取当前页面的HTML内容
page_source = driver.page_source
# 使用 BeautifulSoup 解析 HTML 内容
soup = BeautifulSoup(page_source, 'html.parser')
current_page_span = soup.find('span', class_='notCursor currentPage').text.strip()
print("正在爬取第 " + current_page_span+" 页---")
# 定位歌曲信息所在的标签
songs = soup.find_all('li', class_='song_item flex_c')
# 打开 CSV 文件,准备写入数据
with open('kuwoTop100.csv', mode='a', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
# 遍历歌曲信息,逐行写入 CSV 文件
for song in songs:
css_selector = "li > div.song_rank.flex_c > div "
rank = song.select_one(css_selector).text.strip()
title = song.select_one('.song_name > a').text.strip()
artist = song.select_one('.song_artist > span').text.strip()
times = song.select_one('.song_time > span').text.strip()
writer.writerow([rank, title, artist, times])
page_count += 1 # 计数器加一
if page_count < 3: # 只有在未达到三页时才点击下一页
# 查找下一页按钮并点击
next_button = driver.find_element(By.CSS_SELECTOR,
".page-wrap.pagination .li-page.iconfont.icon-icon_pagedown")
# 点击下一页按钮
driver.execute_script("arguments[0].click();", next_button)
time.sleep(3)
url = 'https://www.kuwo.cn/playlist_detail/1082685104'
crawl_kuwo_top60(url)
页面点击下一页时,url并不会变,所以需要用使用JavaScript点击:有时,即使元素不可见,也可以使用JavaScript来模拟点击。
element = driver.find_element_by_css_selector("i.li-page.iconfont.icon-icon_pagedown")
driver.execute_script("arguments[0].click();", element)