寒假爬虫练习,之前做过豆瓣TOP250,出于练手考虑我决定对原代码进行修改爬取B站2月3日的热门视频
https://www.bilibili.com/v/popular/rank/all
# -*- coding: utf-8 -*-
# @Time : 2021/2/2 17:08
# @Author : 楠缘
# @Email : 2251354955@163.com
# @Software: pycharm
# @Blog :https://blog.csdn.net/m0_51586991
from bs4 import BeautifulSoup
import re
import urllib.request,urllib.error
import xlwt
import sqlite3
findlink = re.compile('<a href="//(.*?)"')
findtitle = re.compile('<a class="title" href=".*_blank">(.*)</a>')
findrating = re.compile(r'<div class="detail">.*?<i class="b-icon play"></i>\n (.*?)\n </span>',re.S)#播放量
findview = re.compile(r'<span class="data-box"><i class="b-icon view"></i>\n (.*?)\n </span>',re.S)
findplay = re.compile(r'<span class="data-box up-name"><i class="b-icon author"></i>\n (.*?)\n </span>',re.S)
findwww = re.compile(r'<div class="pts"><div>(.*)</div>')
def main():
baseurl = "https://www.bilibili.com/v/popular/rank/all"
datalist = getData(baseurl)
savepath = "B站热门2月3日.xls"
saveData(datalist,savepath)
def getData(baseurl):
datalist = []
for i in range(0,1):
url = baseurl
html = askURL(url) #保存获取到的网页源码
soup = BeautifulSoup(html,"html.parser")
for content in soup.find_all('div',class_="content"): #查找符合要求的字符串
# print(content)
# break
data = []#保存一部视频的所有信息
content = str(content)
link = re.findall(findlink,content)[0]
data.append(link)
title = re.findall(findtitle,content)
data.append(title)
rating = re.findall(findrating,content)
data.append(rating)
view = re.findall(findview,content)
data.append(view)
play = re.findall(findplay,content)
data.append(play)
www = re.findall(findwww,content)
data.append(www)
datalist.append(data)
print(datalist)
return datalist
def askURL(url):
head = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"}
request = urllib.request.Request(url,headers=head)
html = ""
try:
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
# print(html)
except urllib.error.URLError as e:
if hasattr(e,"code"):
print(e.code)
if hasattr(e,"reason"):
print(e.reason)
return html
def saveData(datalist,savepath):
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet('B站热门2月3日.xls',cell_overwrite_ok=True)
col = ("视频链接","视频名称","播放量","弹幕量","作者","热度")
for i in range(0,6):
sheet.write(0,i,col[i])
for i in range(0,100):
print("第%d条"%i)
data = datalist[i]
for j in range(0,6):
sheet.write(i+1,j,data[j])
book.save("B站热门2月3日.xls")
if __name__ == '__main__':
main()
print('爬取完毕')
这次的代码在编写时遇到不少麻烦,一些问题导致代码看起来很乱,例如18,19,20行存在的大量空格就是一个我现在没有能够解决的问题,如果哪位大佬可以出手帮助将十分感谢。