def getinfo(url):
wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'html.parser')
#关于到底是写全部selector还是部分的问题
#ranks = soup.select('#rankWrap > div.pc_temp_songlist > ul > li:nth-of-type(1) > span.pc_te
#这种写法只会保存两条数据,不写全能爬取更多位置的数据
#搜索id打#,搜索层级打>,搜索标签属性打点号
ranks = soup.select('span.pc_temp_num')
titles = soup.select('div.pc_temp_songlist > ul > li > a')
times = soup.select('span.pc_temp_tips_r > span')
for rank, title, time in zip(ranks, titles, times):
data = {
'rank': rank.get_text().strip(),
'title': title.get_text().strip(),
'time': time.get_text().strip()
}
print(data)
wb_data = requests.get(url, headers=headers)
soup = BeautifulSoup(wb_data.text, 'html.parser')
#关于到底是写全部selector还是部分的问题
#ranks = soup.select('#rankWrap > div.pc_temp_songlist > ul > li:nth-of-type(1) > span.pc_te
#这种写法只会保存两条数据,不写全能爬取更多位置的数据
#搜索id打#,搜索层级打>,搜索标签属性打点号
ranks = soup.select('span.pc_temp_num')
titles = soup.select('div.pc_temp_songlist > ul > li > a')
times = soup.select('span.pc_temp_tips_r > span')
for rank, title, time in zip(ranks, titles, times):
data = {
'rank': rank.get_text().strip(),
'title': title.get_text().strip(),
'time': time.get_text().strip()
}
print(data)