难点
主要是在抓包分析怎么实现分页获取,通过抓包分析,我们知道每次请求获取视频的时候,返回的数据中都会有一个next_cursor属性
当请求为最后一页的时候,next_cursor为-1,第一次请求next_cursor为0
通过这样分析,我们就可以很容易获取到想要的数据了
获取数据
请求头
headers = {
'Cookie': 'SINAGLOBAL=666731024991.3516.1692108672688; SUB=_2A25J3_fyDeRhGeBK41IY9CbIzDyIHXVrI5m6rDV8PUJbkNANLRnTkW1NR5DGKlU-7QQXFUn8kCO7tHZWHa2lstgT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhfqYIORlaYHk9cTmVipRvw5NHD95QcShn71KBRShM7Ws4Dqcj.i--ciK.RiKLsi--ci-88iKyFi--ci-2Ei-2ci--Ni-2EiKy8; XSRF-TOKEN=j1xKHXp5NhQe6PQfbcsrCsbK; _s_tentry=weibo.com; Apache=7785653743983.5625.1692144415457; ULV=1692144415526:3:3:3:7785653743983.5625.1692144415457:1692112430474; WBPSESS=-hAVHrSkMC8S4jXFC4-lqnFhFtNlrZmAL-r5XJQqEZxHyzIHGIPV0NHIisq9ALS32gvYmuib-sMSj5_PnjsfSxrsrk42crVW65GaxDPbJhzjy5gXZg19j07R0Qy1OJfS5oZKop_lRJX1ZHkKrd5B7w==',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203',
'Referer': 'https://weibo.com/u/5620452341?tabtype=newVideo'
}
获取数据
def get_page_info(user_id, next_cursor):
url = 'https://weibo.com/ajax/profile/getWaterFallContent'
params = {
# 'uid': '5351727623',
# 'uid': '5252751519',
'uid': user_id,
'cursor': next_cursor
}
try:
res = requests.get(url=url, headers=headers, params=params)
if res.status_code == 200:
return res.json()
return None
except requests.exceptions.RequestException as e:
print(e)
return None
解析数据
def parse_json_data(json_data):
try:
# pprint(json_data['data']['next_cursor'])
videos = json_data['data']['list']
for video in videos:
video_url = video['page_info']['media_info']['mp4_hd_url']
# print(video_url)
video_title = video['page_info']['media_info']['kol_title']
video_author = video['page_info']['media_info']['author_name']
video_content = requests.get(url=video_url, headers=headers).content
video_title = deal_special_words(video_title)
if not os.path.exists(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/videos/{video_author}'):
os.mkdir(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/videos/{video_author}')
path = video_title + '.' + 'mp4'
with open(f'videos/{video_author}/{path}', mode='wb') as f:
f.write(video_content)
print(f'{video_title}下载完毕')
return json_data['data']['next_cursor']
except Exception as e:
print(e)
处理特殊字符
def deal_special_words(word):
# 去掉特殊符号
replaceWord = '\/:*?"<>|.,;,。;#??"‘’!@“#¥%……&*()!@#$%^&*()'
for i in replaceWord:
if i in word:
word = word.replace(i, '') # 替换成''
word = word.replace("\n", '')
word.strip()
return word
主函数
def main(user_id):
next_cursor = 0
while True:
json_data = get_page_info(user_id=user_id, next_cursor=next_cursor)
next_cursor = parse_json_data(json_data)
print(f'当前页的cursor:{next_cursor}')
if next_cursor == -1:
print('已经到底了...')
break
效果展示
完整代码
import os
from pprint import pprint
import requests
headers = {
'Cookie': 'SINAGLOBAL=666731024991.3516.1692108672688; SUB=_2A25J3_fyDeRhGeBK41IY9CbIzDyIHXVrI5m6rDV8PUJbkNANLRnTkW1NR5DGKlU-7QQXFUn8kCO7tHZWHa2lstgT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhfqYIORlaYHk9cTmVipRvw5NHD95QcShn71KBRShM7Ws4Dqcj.i--ciK.RiKLsi--ci-88iKyFi--ci-2Ei-2ci--Ni-2EiKy8; XSRF-TOKEN=j1xKHXp5NhQe6PQfbcsrCsbK; _s_tentry=weibo.com; Apache=7785653743983.5625.1692144415457; ULV=1692144415526:3:3:3:7785653743983.5625.1692144415457:1692112430474; WBPSESS=-hAVHrSkMC8S4jXFC4-lqnFhFtNlrZmAL-r5XJQqEZxHyzIHGIPV0NHIisq9ALS32gvYmuib-sMSj5_PnjsfSxrsrk42crVW65GaxDPbJhzjy5gXZg19j07R0Qy1OJfS5oZKop_lRJX1ZHkKrd5B7w==',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203',
'Referer': 'https://weibo.com/u/5620452341?tabtype=newVideo'
}
def deal_special_words(word):
# 去掉特殊符号
replaceWord = '\/:*?"<>|.,;,。;#??"‘’!@“#¥%……&*()!@#$%^&*()'
for i in replaceWord:
if i in word:
word = word.replace(i, '') # 替换成''
word = word.replace("\n", '')
word.strip()
return word
def get_page_info(user_id, next_cursor):
url = 'https://weibo.com/ajax/profile/getWaterFallContent'
params = {
# 'uid': '5351727623',
# 'uid': '5252751519',
'uid': user_id,
'cursor': next_cursor
}
try:
res = requests.get(url=url, headers=headers, params=params)
if res.status_code == 200:
return res.json()
return None
except requests.exceptions.RequestException as e:
print(e)
return None
def parse_json_data(json_data):
try:
# pprint(json_data['data']['next_cursor'])
videos = json_data['data']['list']
for video in videos:
video_url = video['page_info']['media_info']['mp4_hd_url']
# print(video_url)
video_title = video['page_info']['media_info']['kol_title']
video_author = video['page_info']['media_info']['author_name']
video_content = requests.get(url=video_url, headers=headers).content
video_title = deal_special_words(video_title)
if not os.path.exists(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/videos/{video_author}'):
os.mkdir(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/videos/{video_author}')
path = video_title + '.' + 'mp4'
with open(f'videos/{video_author}/{path}', mode='wb') as f:
f.write(video_content)
print(f'{video_title}下载完毕')
return json_data['data']['next_cursor']
except Exception as e:
print(e)
def main(user_id):
next_cursor = 0
while True:
json_data = get_page_info(user_id=user_id, next_cursor=next_cursor)
next_cursor = parse_json_data(json_data)
print(f'当前页的cursor:{next_cursor}')
if next_cursor == -1:
print('已经到底了...')
break
if __name__ == '__main__':
user_id = input("请输入您要爬取的用户id:")
main(user_id=user_id)