import requests
import os
headers = {
'User Agent': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36 Edg/108.0.1462.54''',
'cookie': '_s_tentry=weibo.com; Apache=7133306648074.258.1680155788157; SINAGLOBAL=7133306648074.258.1680155788157; ULV=1680155788162:1:1:1:7133306648074.258.1680155788157:; SUB=_2A25JIVSkDeRhGeFL71cZ9CfIyTWIHXVqV8FsrDV8PUNbmtANLVb9kW9NQh2ZlCk3EXOGfjQvtJHFoD_0-3dVJYmA; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFAUgiyyOmJjpDT1RyxmxGg5JpX5KzhUgL.FoMfSh-RSh.Xeo.2dJLoIEXLxKMLBK-L1--LxK.L1hML12eLxKBLB.2L12eLxK.L1heL12-LxKBLBonLBont; ALF=1711691892; SSOLoginState=1680155892'}
def get_sinceid(url):
resp_json = requests.get(url, params="html.parser", headers=headers).json()
sinceid = resp_json['data']['since_id']
return sinceid
def get_pids(url):
resp_json = requests.get(url, params="html.parser", headers=headers).json()
pids = []
for d in resp_json['data']['list']:
pids.append(d['pid'])
return pids
def get_img(pids):
img_base_urL = 'https://wx1.sinaimg.cn/large/{}.jpg'
img_urls = []
for pid in pids:
img_url = img_base_urL.format(pid)
img_urls.append(img_url)
return img_urls
def download_img(img_urls):
fp = 'c:/爬取的图片'
if not os.path.exists(fp):
os.makedirs(fp)
for img_url in img_urls:
data = requests.get(img_url, headers=headers).content
with open(fp + "\\" + img_url.split("/")[-1], 'wb') as f:
f.write(data)
print('输入爬取微博的uid')
a = input()
print('输入你想获得的页数,每页最多20张,如果没这么多页自动爬取全部图片')
b = input()
b = int(b)
sinceid = 0
base_url = f'https://weibo.com/ajax/profile/getImageWall?uid={a}&sinceid=0&has_album=true'
for i in range(b):
pids = get_pids(base_url)
sinceid = get_sinceid(base_url)
if sinceid == 0:
img_urls = get_img(pids)
download_img(img_urls)
print(f'第{i + 1}页下载完成')
break
base_url = f'https://weibo.com/ajax/profile/getImageWall?uid={a}&' + 'sinceid={}&has_album=true'.format(sinceid)
img_urls = get_img(pids)
download_img(img_urls)
print(f'第{i + 1}页下载完成')
print('下载完毕,图片路径c:/爬取的图片')
需要安装request包,uid的获取方法微博查询他人uid方法 - 哔哩哔哩 (bilibili.com)