爬取微博用户的相册图片

难点分析

主要是在抓包分析怎么实现分页获取,通过抓包分析,我们知道每次请求获取照片的时候,返回的数据中都会有一个since_id 和 bottom_tips_visible

当请求为最后一页数据的时候,since_id为0和bottom_tips_visible为true

用过上述分析我们就可以轻松获取数据了 

获取数据

def get_page_info(username, since_id):
    url = 'https://weibo.com/ajax/profile/getImageWall'
    headers = {
        'Cookie': 'SINAGLOBAL=666731024991.3516.1692108672688; SUB=_2A25J3_fyDeRhGeBK41IY9CbIzDyIHXVrI5m6rDV8PUJbkNANLRnTkW1NR5DGKlU-7QQXFUn8kCO7tHZWHa2lstgT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhfqYIORlaYHk9cTmVipRvw5NHD95QcShn71KBRShM7Ws4Dqcj.i--ciK.RiKLsi--ci-88iKyFi--ci-2Ei-2ci--Ni-2EiKy8; XSRF-TOKEN=j1xKHXp5NhQe6PQfbcsrCsbK; _s_tentry=weibo.com; Apache=7785653743983.5625.1692144415457; ULV=1692144415526:3:3:3:7785653743983.5625.1692144415457:1692112430474; PC_TOKEN=7b320ce9a2; WBStorage=4d96c54e|undefined; WBPSESS=-hAVHrSkMC8S4jXFC4-lqnFhFtNlrZmAL-r5XJQqEZxHyzIHGIPV0NHIisq9ALS32gvYmuib-sMSj5_PnjsfSxrsrk42crVW65GaxDPbJhzjy5gXZg19j07R0Qy1OJfS5oZKop_lRJX1ZHkKrd5B7w==',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203',
        'Referer': 'https://weibo.com/',
    }
    params = {
        'uid': username,
        'sinceid': since_id,
    }
    try:
        res = requests.get(url=url, headers=headers, params=params)
        if res.status_code == 200:
            return res.json()
        return None
    except requests.exceptions.RequestException as e:
        print(e)
        return None

解析数据

def parse_json_data(json_data, username):
    try:
        headers = {
            'Cookie': 'SINAGLOBAL=666731024991.3516.1692108672688; SUB=_2A25J3_fyDeRhGeBK41IY9CbIzDyIHXVrI5m6rDV8PUJbkNANLRnTkW1NR5DGKlU-7QQXFUn8kCO7tHZWHa2lstgT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhfqYIORlaYHk9cTmVipRvw5NHD95QcShn71KBRShM7Ws4Dqcj.i--ciK.RiKLsi--ci-88iKyFi--ci-2Ei-2ci--Ni-2EiKy8; XSRF-TOKEN=j1xKHXp5NhQe6PQfbcsrCsbK; _s_tentry=weibo.com; Apache=7785653743983.5625.1692144415457; ULV=1692144415526:3:3:3:7785653743983.5625.1692144415457:1692112430474; PC_TOKEN=7b320ce9a2; WBStorage=4d96c54e|undefined; WBPSESS=-hAVHrSkMC8S4jXFC4-lqnFhFtNlrZmAL-r5XJQqEZxHyzIHGIPV0NHIisq9ALS32gvYmuib-sMSj5_PnjsfSxrsrk42crVW65GaxDPbJhzjy5gXZg19j07R0Qy1OJfS5oZKop_lRJX1ZHkKrd5B7w==',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203',
            'Referer': 'https://weibo.com/',
        }
        img_prefix_url = 'https://wx2.sinaimg.cn/webp720/'
        # pprint(json_data['data']['list'])
        imgs = json_data['data']['list']
        for img in imgs:
            img_url = img_prefix_url + img['pid'] + '.jpg'
            img_name = img['pid']
            # print(img_url)
            img_content = requests.get(url=img_url, headers=headers).content
            if not os.path.exists(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/imgs/{username}'):
                os.mkdir(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/imgs/{username}')
            path = img_name + '.' + 'jpg'
            with open(f'imgs/{username}/{path}', mode='wb') as f:
                f.write(img_content)
                print(f'{img_name}下载完毕')
        return json_data['bottom_tips_visible'], json_data['data']['since_id']
    except Exception as e:
        print(e)

主函数

def main(username):
    # username = '5252751519'
    # username = '2150318380'
    since_id = 0
    while True:
        json_data = get_page_info(username=username, since_id=since_id)
        is_stop, since_id = parse_json_data(json_data=json_data, username=username)
        print(f'当前状态:{is_stop},since_id:{since_id}')
        if str(is_stop) == 'True' or str(since_id) == '0':
            print("已经到底了...")
            break

效果展示

 完整代码

import requests
import os
from pprint import pprint


def get_page_info(username, since_id):
    url = 'https://weibo.com/ajax/profile/getImageWall'
    headers = {
        'Cookie': 'SINAGLOBAL=666731024991.3516.1692108672688; SUB=_2A25J3_fyDeRhGeBK41IY9CbIzDyIHXVrI5m6rDV8PUJbkNANLRnTkW1NR5DGKlU-7QQXFUn8kCO7tHZWHa2lstgT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhfqYIORlaYHk9cTmVipRvw5NHD95QcShn71KBRShM7Ws4Dqcj.i--ciK.RiKLsi--ci-88iKyFi--ci-2Ei-2ci--Ni-2EiKy8; XSRF-TOKEN=j1xKHXp5NhQe6PQfbcsrCsbK; _s_tentry=weibo.com; Apache=7785653743983.5625.1692144415457; ULV=1692144415526:3:3:3:7785653743983.5625.1692144415457:1692112430474; PC_TOKEN=7b320ce9a2; WBStorage=4d96c54e|undefined; WBPSESS=-hAVHrSkMC8S4jXFC4-lqnFhFtNlrZmAL-r5XJQqEZxHyzIHGIPV0NHIisq9ALS32gvYmuib-sMSj5_PnjsfSxrsrk42crVW65GaxDPbJhzjy5gXZg19j07R0Qy1OJfS5oZKop_lRJX1ZHkKrd5B7w==',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203',
        'Referer': 'https://weibo.com/',
    }
    params = {
        'uid': username,
        'sinceid': since_id,
    }
    try:
        res = requests.get(url=url, headers=headers, params=params)
        if res.status_code == 200:
            return res.json()
        return None
    except requests.exceptions.RequestException as e:
        print(e)
        return None


def parse_json_data(json_data, username):
    try:
        headers = {
            'Cookie': 'SINAGLOBAL=666731024991.3516.1692108672688; SUB=_2A25J3_fyDeRhGeBK41IY9CbIzDyIHXVrI5m6rDV8PUJbkNANLRnTkW1NR5DGKlU-7QQXFUn8kCO7tHZWHa2lstgT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WhfqYIORlaYHk9cTmVipRvw5NHD95QcShn71KBRShM7Ws4Dqcj.i--ciK.RiKLsi--ci-88iKyFi--ci-2Ei-2ci--Ni-2EiKy8; XSRF-TOKEN=j1xKHXp5NhQe6PQfbcsrCsbK; _s_tentry=weibo.com; Apache=7785653743983.5625.1692144415457; ULV=1692144415526:3:3:3:7785653743983.5625.1692144415457:1692112430474; PC_TOKEN=7b320ce9a2; WBStorage=4d96c54e|undefined; WBPSESS=-hAVHrSkMC8S4jXFC4-lqnFhFtNlrZmAL-r5XJQqEZxHyzIHGIPV0NHIisq9ALS32gvYmuib-sMSj5_PnjsfSxrsrk42crVW65GaxDPbJhzjy5gXZg19j07R0Qy1OJfS5oZKop_lRJX1ZHkKrd5B7w==',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.1901.203',
            'Referer': 'https://weibo.com/',
        }
        img_prefix_url = 'https://wx2.sinaimg.cn/webp720/'
        # pprint(json_data['data']['list'])
        imgs = json_data['data']['list']
        for img in imgs:
            img_url = img_prefix_url + img['pid'] + '.jpg'
            img_name = img['pid']
            # print(img_url)
            img_content = requests.get(url=img_url, headers=headers).content
            if not os.path.exists(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/imgs/{username}'):
                os.mkdir(f'E:/Python/project/网络爬虫/12-爬取微博用户视频/imgs/{username}')
            path = img_name + '.' + 'jpg'
            with open(f'imgs/{username}/{path}', mode='wb') as f:
                f.write(img_content)
                print(f'{img_name}下载完毕')
        return json_data['bottom_tips_visible'], json_data['data']['since_id']
    except Exception as e:
        print(e)


def main(username):
    # username = '5252751519'
    # username = '2150318380'
    since_id = 0
    while True:
        json_data = get_page_info(username=username, since_id=since_id)
        is_stop, since_id = parse_json_data(json_data=json_data, username=username)
        print(f'当前状态:{is_stop},since_id:{since_id}')
        if str(is_stop) == 'True' or str(since_id) == '0':
            print("已经到底了...")
            break


if __name__ == '__main__':
    username = input("请输入您要爬取的用户id:")
    main(username=username)

  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值