python爬取B站关注列表

一、数据库的设计与操作

1、数据的分析

在这里插入图片描述
B站的关注列表在

https://api.bilibili.com/x/relation/followings?vmid=UID&pn=1&ps=50&order=desc&order_type=attention

中,一页最多50条信息。

我们大致分析一下信息,

{
	"code": 0,
	"message": "0",
	"ttl": 1,
	"data": {
		"list": [{……

首先,列表内容存在data:list里。

其次,对于列表中每一项,有如下信息


			"mid": 672353429,
			"attribute": 2,
			"mtime": 1630510107,
			"tag": null,
			"special": 0,
			"contract_info": {
				"is_contractor": false,
				"ts": 0,
				"is_contract": false,
				"user_attr": 0
			},
			"uname": "贝拉kira",
			"face": "http://i2.hdslb.com/bfs/face/668af440f8a8065743d3fa79cfa8f017905d0065.jpg",
			"sign": "元气满满的A-SOUL舞担参上~目标TOP IDOL,一起加油!",
			"official_verify": {
				"type": 0,
				"desc": "虚拟偶像团体A-SOUL 所属艺人"
			},
			"vip": {
				"vipType": 2,
				"vipDueDate": 1674576000000,
				"dueRemark": "",
				"accessStatus": 0,
				"vipStatus": 1,
				"vipStatusWarn": "",
				"themeType": 0,
				"label": {
					"path": "",
					"text": "年度大会员",
					"label_theme": "annual_vip",
					"text_color": "#FFFFFF",
					"bg_style": 1,
					"bg_color": "#FB7299",
					"border_color": ""
				},
				"avatar_subscript": 1,
				"nickname_color": "#FB7299",
				"avatar_subscript_url": "http://i0.hdslb.com/bfs/vip/icon_Certification_big_member_22_3x.png"
			}
		

其中,mid为用户独一无二的UID,vipType,0是什么都没开,1是大会员,2是年度大会员,official_verify中,type 0代表官方认证,-1代表没有官方认证。

同时我们发现,如果对方锁了列表,会返回

{"code":-400,"message":"请求错误","ttl":1}

2、数据库设计

基于这些,我们先设计数据库,包含两张表,用户信息的基本属性表和关注的关系表。

def createDB():

    link=sqlite3.connect('BiliFollowDB.db')
    
    print("database open success")
    
    UserTableDDL='''
                create table if not exists user(
                UID int PRIMARY KEY     NOT NULL,
                NAME varchar            NOT NULL,
                SIGN varchar            DEFAULT NULL,
                vipType int             NOT NULL,
                verifyType int          NOT NULL,
                verifyDesc varchar      DEFAULT NULL)
                '''
    
    RelationTableDDL='''
                create table if not exists relation(
                follower int           NOT NULL,
                following int          NOT NULL,
                followTime int         NOT NULL,
                PRIMARY KEY (follower,following),
                FOREIGN KEY(follower,following) REFERENCES user(UID,UID)
                )
                '''
    
    # create user table
    link.execute(UserTableDDL)
    
    # create relation table
    link.execute(RelationTableDDL)
    
    print("database create success")
    
    link.commit()
    link.close()

3、数据库操作

其次是插入新用户的列表,我的思路是爬完一个人的关注列表,把一整个list丢给该函数,判断是否存在新增用户,存在则把新增用户传回,作为下一次爬虫的起点。

def insertUser(infos):
    
    conn=sqlite3.connect('BiliFollowDB.db')
    link=conn.cursor()
    
    InsertCmd="insert into user (UID,NAME,vipType,verifyType,sign,verifyDesc) values (?,?,?,?,?,?);"
    
    ExistCmd="select count(UID) from user where UID='%d';"# % UID
    
    newID=[]
    
    for info in infos:
        answer=link.execute(ExistCmd%info['uid'])
        for row in answer:
            exist_ID=row[0]
        
        if exist_ID==0:
            newID.append(info['uid'])
            link.execute(InsertCmd,(info['uid'],info['name'],info['vipType'],info['verifyType'],info['sign'],info['verifyDesc']))
            
    conn.commit()
    conn.close()
    
    return newID

然后是插入关系的函数,这个比较简单

def insertFollowing(uid:int,subscribe):
    
    conn=sqlite3.connect('BiliFollowDB.db')
    link=conn.cursor()
    
    InsertCmd="insert into relation (follower,following,followTime) values (?,?,?);"
    
    for follow in subscribe:
        link.execute(InsertCmd,(uid,follow[0],follow[1]))
        
    conn.commit()
    conn.close()

二、爬虫

通过观察,我们发现睿叔叔锁了5页的关注列表
在这里插入图片描述
即使是人工操作也只能访问5页,那没办法啦,我们就爬5页吧。

def getFollowingList(uid:int):
    
    url="https://api.bilibili.com/x/relation/followings?vmid=%d&pn=%d&ps=50&order=desc&order_type=attention&jsonp=jsonp"# % (UID, Page Number)
    
    infos=[]
    
    subscribe=[]
    
    for i in range(1,6):
        html=requests.get(url%(uid,i))
        if html.status_code!=200:
            print("GET ERROR!")
            
        text=html.text
        dic=json.loads(text)
        
        if dic['code']==-400:
            break
    
        list=dic['data']['list']
        
        for usr in list:
            info={}
            info['uid']=usr['mid']
            info['name']=usr['uname']
            info['vipType']=usr['vip']['vipType']
            info['verifyType']=usr['official_verify']['type']
            info['sign']=usr['sign']
            if info['verifyType']==-1:
                info['verifyDesc']='NULL'
            else :
                info['verifyDesc']=usr['official_verify']['desc']
            
            subscribe.append((usr['mid'],usr['mtime']))
            infos.append(info)
        
    newID=insertUser(infos)
    insertFollowing(uid,subscribe)
    
    return newID

三、完整代码

#by concyclics
# -*- coding:UTF-8 -*-

import sqlite3
import json
import requests

def createDB():

    link=sqlite3.connect('BiliFollowDB.db')
    
    print("database open success")
    
    UserTableDDL='''
                create table if not exists user(
                UID int PRIMARY KEY     NOT NULL,
                NAME varchar            NOT NULL,
                SIGN varchar            DEFAULT NULL,
                vipType int             NOT NULL,
                verifyType int          NOT NULL,
                verifyDesc varchar      DEFAULT NULL)
                '''
    
    RelationTableDDL='''
                create table if not exists relation(
                follower int           NOT NULL,
                following int          NOT NULL,
                followTime int         NOT NULL,
                PRIMARY KEY (follower,following),
                FOREIGN KEY(follower,following) REFERENCES user(UID,UID)
                )
                '''
    
    # create user table
    link.execute(UserTableDDL)
    
    # create relation table
    link.execute(RelationTableDDL)
    
    print("database create success")
    
    link.commit()
    link.close()
    

def insertUser(infos):
    
    conn=sqlite3.connect('BiliFollowDB.db')
    link=conn.cursor()
    
    InsertCmd="insert into user (UID,NAME,vipType,verifyType,sign,verifyDesc) values (?,?,?,?,?,?);"
    
    ExistCmd="select count(UID) from user where UID='%d';"# % UID
    
    newID=[]
    
    for info in infos:
        answer=link.execute(ExistCmd%info['uid'])
        for row in answer:
            exist_ID=row[0]
        
        if exist_ID==0:
            newID.append(info['uid'])
            link.execute(InsertCmd,(info['uid'],info['name'],info['vipType'],info['verifyType'],info['sign'],info['verifyDesc']))
            
    conn.commit()
    conn.close()
    
    return newID



def insertFollowing(uid:int,subscribe):
    
    conn=sqlite3.connect('BiliFollowDB.db')
    link=conn.cursor()
    
    InsertCmd="insert into relation (follower,following,followTime) values (?,?,?);"
    
    for follow in subscribe:
        try:
            link.execute(InsertCmd,(uid,follow[0],follow[1]))
        except:
            print((uid,follow[0],follow[1]))
        
    conn.commit()
    conn.close()
    
    
    
def getFollowingList(uid:int):
    
    url="https://api.bilibili.com/x/relation/followings?vmid=%d&pn=%d&ps=50&order=desc&order_type=attention&jsonp=jsonp"# % (UID, Page Number)
    
    infos=[]
    
    subscribe=[]
    
    for i in range(1,6):
        html=requests.get(url%(uid,i))
        if html.status_code!=200:
            print("GET ERROR!")
            return []
            
        text=html.text
        dic=json.loads(text)
        
        if dic['code']==-400:
            return []
    
        try:
            list=dic['data']['list']
        except:
            return []
        
        for usr in list:
            info={}
            info['uid']=usr['mid']
            info['name']=usr['uname']
            info['vipType']=usr['vip']['vipType']
            info['verifyType']=usr['official_verify']['type']
            info['sign']=usr['sign']
            if info['verifyType']==-1:
                info['verifyDesc']='NULL'
            else :
                info['verifyDesc']=usr['official_verify']['desc']
            
            subscribe.append((usr['mid'],usr['mtime']))
            infos.append(info)
        
    newID=insertUser(infos)
    insertFollowing(uid,subscribe)
    
    return newID

def getFollowingUid(uid:int):
    url="https://api.bilibili.com/x/relation/followings?vmid=%d&pn=%d&ps=50&order=desc&order_type=attention&jsonp=jsonp"# % (UID, Page Number)
    
    for i in range(1,6):
        html=requests.get(url%(uid,i))
        if html.status_code!=200:
            print("GET ERROR!")
            return []
        
        text=html.text
        dic=json.loads(text)
        
        if dic['code']==-400:
            return []
        
        try:
            list=dic['data']['list']
        except:
            return []
        
        IDs=[]
        
        for usr in list:
            IDs.append(usr['mid'])

        return IDs

def work(root):
    
    IDlist=root
    tmplist=[]
    while len(IDlist)!=0:
        tmplist=[]
        for ID in IDlist:
            print(ID)
            tmplist+=getFollowingList(ID)
            
        IDlist=tmplist
        
def rework():
    conn=sqlite3.connect('BiliFollowDB.db')
    link=conn.cursor()
    
    SelectCmd="select uid from user;"
    
    answer=link.execute(SelectCmd)
    
    IDs=[]
    
    for row in answer:
        IDs.append(row[0])
        
    conn.commit()
    conn.close()
    
    newID=[]
    
    print(IDs)
        
    for ID in IDs:
        ids=getFollowingUid(ID)
        for id in ids:
            if id not in IDs:
                newID.append(id)
    
    return newID

    

if __name__=="__main__":

    createDB()
    
    #work([**put root UID here**,])
    
    
    

四、项目仓库

如果您觉得有所收获的话,麻烦帮我点个小小的star

### 如何使用 Python 编写爬虫抓取 B 视频数据 #### 准备工作 为了实现这一目标,需要安装一些必要的库。这些库可以帮助处理 HTTP 请求、解析 JSON 数据以及管理异步操作。 ```bash pip install requests aiohttp bilibili-api-python ``` #### 抓取视频基本信息 通过调用 `bilibili-api` 库中的接口方法可以直接获取到指定 AV/BV 号的视频详情: ```python from bilibili_api import video as bvid_video, sync def fetch_basic_info(bv_id): v = bvid_video.Video(bvid=bv_id) info_dict = sync(v.get_info()) title = info_dict['title'] pub_date = info_dict['pubdate'] # 时间戳形式返回发布时间 return { "标题": title, "发布时间": pub_date } ``` 此部分代码利用了第三方封装好的 API 接口来简化请求过程[^1]。 #### 获取弹幕列表 针对每一条视频记录其对应的 XML 格式的弹幕文件链接,并下载保存至本地;接着读取该文件提取其中的有效字段完成进一步的数据挖掘任务。 ```python import xml.etree.ElementTree as ET from datetime import datetime async def download_danmaku(video_bvid, output_file='danmakus.xml'): vid = bvid_video.Video(bvid=video_bvid) danmu_url = await vid.get_dm_xml() async with aiohttp.ClientSession() as session: resp = await session.get(danmu_url[0]) content = await resp.text() with open(output_file, 'w', encoding='utf8') as f: f.write(content) # 解析XML格式的弹幕文档 def parse_danmaku(file_path): tree = ET.parse(file_path) root = tree.getroot() items = [] for item in root.findall('d'): text = item.text.strip() timestamp_str = float(item.attrib['p'].split(',')[0]) # 提取消息显示的时间轴位置 formatted_time = str(datetime.fromtimestamp(timestamp_str)) items.append({ "content": text, "time": formatted_time }) return items ``` 上述函数实现了从远程服务器拉取特定编号影片关联的所有即时聊天消息并将其转换成易于理解的形式存储下来供后续分析使用[^2]。 #### 清洗与统计分析 对于收集来的原始弹幕资料而言,在正式投入应用之前往往还需要经历一系列预处理环节,比如去除无关字符、过滤敏感词汇等。之后再基于清理后的高质量语料开展诸如词频计算之类的量化研究活动。 ```python import jieba.analyse import matplotlib.pyplot as plt from wordcloud import WordCloud from collections import Counter # 对中文字符串做分词处理 def tokenize(texts_list): words = [] for line in texts_list: seg_result = list(jieba.cut(line)) filtered_words = filter(lambda w: len(w)>1 and not w.isdigit(), seg_result) # 过滤掉单个字母/数字 words.extend(filtered_words) return words # 绘制词云图像 def plot_word_cloud(word_freq_dist): wc = WordCloud(font_path='/path/to/simhei.ttf', background_color="white").generate_from_frequencies(dict(word_freq_dist.most_common())) plt.imshow(wc, interpolation='bilinear') plt.axis("off") plt.show() if __name__ == '__main__': bv_num = input("请输入要查询的BV号:") basic_data = fetch_basic_info(bv_num) print(f'视频名称:{basic_data["标题"]}\n发布日期:{datetime.utcfromtimestamp(int(basic_data["发布时间"]))}') asyncio.run(download_danmaku(bv_num)) parsed_comments = parse_danmaku('./danmakus.xml') all_texts = ''.join([item['content'] for item in parsed_comments]) tokens = tokenize(all_texts.split()) freq_distribution = Counter(tokens) top_keywords = dict(freq_distribution.most_common(50)) # 输出最常见的前五十个关键字及其出现次数 plot_word_cloud(top_keywords) ``` 这段脚本综合运用多种技术手段完成了对所关注对象全面而深入的理解——不仅限于表面层次的信息检索,更涉及到深层次的内容解读和模式识别层面的工作[^3]。
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

concyclics

可怜可怜孩子吧

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值