python---Pixiv排行榜图片获取(2024.2.16)


1.提示:
使用需要安装各种import的包,都是很基础的包,直接安装即可。
自备梯子 。

切记把userid和cookie改为自己账号的参数!
userid就是点击pixiv头像,网址后面一串数,
cookie是打开排行榜后,按F12打开开发者工具,然后切换到network(网络)栏,刷新页面,找到ranking.php文件,在里面就能找到cookie信息。

2.严肃警告

  1. 本程序仅可用作个人爱好,商业用途严禁!
  2. 请自觉遵守君子协定robots.txt
  3. 不要给网址过大的压力,每天以及同一时段的访问量请控制在一定程度内!

3.思路:
天天有群友找我要涩图,库存根本不够哇,得想办法弄点。

pixiv的爬取有很多大佬做过了,不过我看了一些都是弄得类似于项目一样,确实都很厉害,但我的需求简单,写在一个文件里适合我这种懒蛋。

  1. 首先通过RankingCrawler类的get_multi_page_json方法,获取榜单的json数据,将id添加到collector中。
  2. 然后通过collector的collect方法,遍历每个id,用get_artworks_urls方法获取id页面内的所有图片的urls,将url添加到downloader中。
  3. 最后通过downloaderdownload方法,多线程调用同class内的download_image方法同时下载图片。

4.使用方法:
一般来说都是直接run就行,所有需要修改的参数都在RankingCrawler类的init中,看一眼就明白

5.代码如下:

import os
import re
import time
import requests
import concurrent.futures as futures

from typing import Set, Iterable, Callable, Dict, Optional, Tuple
from tqdm import tqdm


class Downloader():

    def __init__(self, capacity, headers, threads, standard_time, date):
        self.url_group: Set[str] = set()
        self.capacity = capacity   # 最大下载量(MB)
        self.store_path = date + "/"   # 日期作为存储路径
        self.standard_time = standard_time
        self.threads = threads
        self.headers = headers.copy()

    # 添加url
    def add(self, urls: Iterable[str]):
        for url in urls:
            self.url_group.add(url)
    
    # 下载单张图片
    def download_image(self, url: str) -> float:
        """
        func: 1.根据url下载单张图片,返回图片大小(MB)
        return: 返回图片大小(MB)
        url example: "https://i.pximg.net/img-master/img/2024/02/10/03/09/52/115911580_p0_master1200.jpg"
        """

        # image_name
        image_name = url[url.rfind("/") + 1:]
        # image_id
        image_id = re.search(r"/(\d+)_", url).group(1)
        # image_path
        image_path = self.store_path + image_name

        # 添加Referer
        self.headers.update({"Referer": f"https://www.pixiv.net/artworks/{image_id}"})

        # 确保存储路径存在
        os.makedirs(self.store_path, exist_ok=True)
        # 判断图片是否存在
        if os.path.exists(image_path):
            # print(f"File {image_name} already exists. Skipping download.")
            return 0

        # 下载图片, 尝试最多10次,因为是多线程下载,所以间隔时间可以稍微长一点点
        for i in range(10):
            try:
                response = requests.get(url, headers=self.headers, timeout=(4, self.standard_time))  # timeout(连接超时, 读取超时)

                if response.status_code == 200:

                    if "content-length" not in response.headers:  # 确保content-length在response.headers中,否则抛出异常
                        raise "content-length not in response.headers"

                    image_size = int(response.headers["content-length"])
                    with open(image_path, "wb") as f:
                        f.write(response.content)
                    return image_size / (1 << 20)

            except Exception as e:
                pass

        return 0

    # 多线程下载多张图片
    def download(self):
        # 提前封装download_image函数的固定参数,因为map函数只能传入一个参数
        flow_size = .0

        print("===== downloader start =====")
        with futures.ThreadPoolExecutor(self.threads) as executor:
            # tqdm为进度条
            with tqdm(total=len(self.url_group), desc="downloading") as pbar:
                # 多线程并发,通过futures的map方法,将url_group中的每个url传入download_image函数,并通过迭代器的方式返回每个图片的大小
                for image_size in executor.map(self.download_image, self.url_group):
                    flow_size += image_size
                    pbar.update()
                    pbar.set_description(f"downloading / flow {flow_size:.2f}MB")
                    if flow_size > self.capacity:
                        executor.shutdown(wait=False, cancel_futures=True)
                        break
        print("===== downloader complete =====")

        return flow_size


class Collector():

    def __init__(self, threads, user_id, headers, downloader):
        self.id_group: Set[str] = set()  # illust_id

        self.threads = threads
        self.user_id = user_id
        self.headers = headers.copy()
        self.downloader = downloader

    def add(self, image_ids):
        self.id_group.add(image_ids)

    # 解析HTTP响应,提取并返回一个包含原始图像URLs的集合
    def select_page(self, response) -> Set[str]:
        """
        url: https://www.pixiv.net/ajax/illust/xxxx/pages?lang=zh
        collect all image urls from (page.json)

        Returns: Set[str]: urls
        """
        group = set()
        for url in response.json()["body"]:
            group.add(url["urls"]["original"])
        return group

    # 对给定的URL执行HTTP GET请求,并使用指定的选择器函数处理响应数据
    def get_artworks_urls(self, args: Tuple[str, Callable, Optional[Dict]]) -> Optional[Iterable[str]]:
        # 拿到参数
        url, selector, additional_headers = args

        # 更新请求头
        headers = self.headers
        headers.update(additional_headers)
        time.sleep(1)

        # 尝试抓取最多10次
        for i in range(10):
            try:
                response = requests.get(url, headers=headers, timeout=4)

                if response.status_code == 200:
                    id_group = selector(response)
                    return id_group

            except Exception as e:
                print(e)

            time.sleep(1)

    # 并发地收集所有艺术作品的图像URLs,并将它们发送给下载器进行下载。
    def collect(self):
        """
        collect all image ids in each artwork, and send to downloader
        NOTE: an artwork may contain multiple images
        """
        print("===== collector start =====")

        with futures.ThreadPoolExecutor(self.threads) as executor:
            with tqdm(total=len(self.id_group), desc="collecting urls") as pbar:

                # 生成每个illust_id对应的url
                urls_list = [f"https://www.pixiv.net/ajax/illust/{illust_id}/pages?lang=zh" for illust_id in self.id_group]

                # 生成每个illust_id对应的请求头
                additional_headers = [
                    {
                        "Referer": f"https://www.pixiv.net/artworks/{illust_id}",
                        "x-user-id": self.user_id,
                    }
                    for illust_id in self.id_group]

                # 通过get_artworks_urls获取url下的所有图片urls,发送给downloader
                # futures.ThreadPoolExecutor(n_thread).map(func, iterable) 会将可迭代对象中的每个元素传入func中, 并将所有的func返回值组成一个迭代器返回
                for urls in executor.map(self.get_artworks_urls, zip(urls_list, [self.select_page] * len(urls_list), additional_headers)):
                    if urls is not None:
                        self.downloader.add(urls)
                    pbar.update()

        print("===== collector complete =====")
        return self.id_group


class RankingCrawler():

    def __init__(self):
        """
        download artworks from ranking

        参数(*为可修改的):
        top_num: 排行榜前多少名*
        time_mode: 榜单时间(日, 周, 月...)*
        content: 内容(插画, 漫画, 动图...)*
        headers: 请求头*
        threads: 线程数*
        capacity: 最大流量容量(MB)*
        standard_time: 标准等待时间
        user_id: 自己的用户id*
        date: 任意结束日期(不能为当日日期!最多为当日日期-2天)*    (例:20240218,mode=monthly,那么就是抓取20240118到20240218的榜单)

        切记cookie和user_id要是同一个账号的

        """
        self.top_num = 50
        self.time_mode = "monthly"      # daily_air, weekly, monthly
        self.content = "illust"
        self.headers = {
            "Cookie": "first_visit_datetime_pc=2022-08-02+15%3A36%3A28; p_ab_id=1; p_ab_id_2=7; p_ab_d_id=1023356603; yuid_b=QXQ4QA; privacy_policy_notification=0; a_type=1; d_type=1; login_ever=yes; _im_vid=01HM1N03159737XTWCJAJ2BY5P; QSI_S_ZN_5hF4My7Ad6VNNAi=v:0:0; _gcl_au=1.1.652233640.1707984789; _im_uid.3929=b.427a030d239f8f0f; _pbjs_userid_consent_data=3524755945110770; _im_vid=01HM1N03159737XTWCJAJ2BY5P; _pubcid=72d87e02-9d02-4eba-b647-77676ef3673c; device_token=29dcd2275dd3ad33f978780a8732d797; __utmc=235335808; cf_clearance=dm9BjZDgPR_7P6FyF01hMvFo6_YHMMnXeuNeh27eCBI-1708345123-1.0-AXR4V4yJkDaYgijBkHd9UVuIBnEInvPrmHN34DpD3t5tIpehWxrAgQ1r9I7+vQIyMtEzFrjhs+mdmnry7qq6l+Y=; _gid=GA1.2.2041924151.1708345235; FCNEC=%5B%5B%22AKsRol__CNeOIOGzfarvWVE024ABJyuS0JuZ-a4EVu2bO3v6IulUgGzLEasagVrBZ8eiP7sMcLdj_PBN2pCXdLdRknBjvSJpTzg49MUAwkwk207sqOnAyn5LgYIxGDYKQBmDpY-jP4z9ZvoV_d7DxVjb4ZueshxsXQ%3D%3D%22%5D%5D; cto_bundle=ZTCen19jalZ0ZGVPMm1QN3ljUmJ0ZjBlazdXenp1JTJGeFIwNUNOb001V2ZCTHNveGVrNzFzUWlDcSUyRllKeFl3a2lZciUyRnpLWXRzOFgwVk41OXRKWCUyRlZCMWxEYTdkR0tmJTJGQzBNaUZ6JTJCNUgwJTJCcjAzdkhNcVhQRlVqTkNRUzB1Mm01QjBZVUFHZUl3UkglMkJRWXpUUHMyUEdMMDQzUlFBJTNEJTNE; cto_bidid=6NqvDl9EUWVHTDZEMjdZVEVad0g2Skh0bDdtMVpCJTJGNiUyRjlBQ3N0JTJCeVZaeUU0dWIzemIzRlVsWjZVWnlDcFh4VnJVQ0xNRERtRCUyQnRDRmU0UkQlMkZOdERHUWJsMmJDREVzdks3SXJjVFNFWXBpOFI2bVklM0Q; cto_dna_bundle=gW3ZBF85WlVTMUslMkYzb3lGS2J1ekVhb2pXaVgyQnF5ZkFyelBXZUIyMEMlMkYlMkZOWnZGbkhVaFBjT1pRcmhvdEdzTlhOcTdKY05RaGt2UVFnSkt2SSUyQmFrWWZ4ZjVBJTNEJTNE; __cf_bm=Gkb0bQ.CRssgRal1ghknMYEnCsmNswZBWIuaMx2X7oM-1708357221-1.0-ATqghKCzNg+KhPGDn/t/63H7GOrNexnTOvVEISsYAqKF7zA+HAvxfHu8TDs+TqTzF5NFX8xRefd66F65w5OJEqjrVf2VjhiR6yBOMzrOJuG5; __utma=235335808.1308491601.1659422191.1708353783.1708357223.12; __utmz=235335808.1708357223.12.5.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmt=1; cc1=2024-02-20%2000%3A40%3A32; PHPSESSID=22198038_H87q1tYtLEbhlcvsrAd5IZGu6xOHyQ8o; c_type=22; privacy_policy_agreement=0; b_type=2; _ga_MZ1NL4PHH0=GS1.1.1708357236.5.1.1708357301.0.0.0; __utmv=235335808.|2=login%20ever=yes=1^3=plan=normal=1^5=gender=female=1^6=user_id=22198038=1^9=p_ab_id=1=1^10=p_ab_id_2=7=1^11=lang=zh=1; __utmb=235335808.3.10.1708357223; _ga=GA1.2.1308491601.1659422191; _gat_UA-1830249-3=1; _ga_75BBYNYN9J=deleted",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0"
        }
        self.threads = 12
        self.capacity = 10000
        self.standard_time = 10
        self.user_id = "22198038"
        self.date = "20240115"

        self.downloader = Downloader(self.capacity, self.headers, self.threads, self.standard_time, self.date)
        self.collector = Collector(self.threads, self.user_id, self.headers, self.downloader)

    # 拿到榜单的json数据,将id添加到collector中
    def get_multi_page_json(self, time_mode=None, content=None, date=None):

        # 处理ai榜单
        if time_mode is not None:
            self.time_mode = time_mode
            if self.time_mode == "daily_ai":
                self.top_num = 50
        if content is not None:
            self.content = content
        if date is not None:
            self.date = date

        # 抓取
        for i in range(1, self.top_num // 50 + 1):
            """
            example: 
            1.https://www.pixiv.net/ranking.php?mode=monthly&content=illust&date=20240215&p=1&format=json
            2.https://www.pixiv.net/ranking.php?mode=daily_ai&content=all&date=20240215&p=1&format=json
            """
            url = f"https://www.pixiv.net/ranking.php?mode={self.time_mode}&content={self.content}&date={self.date}&p={i}&format=json"
            headers = self.headers
            headers.update(
                {
                    "Referer": f"https://www.pixiv.net/ranking.php?mode={self.time_mode}&date={self.date}",
                    "x-requested-with": "XMLHttpRequest"
                })

            response = requests.get(url, headers=headers, timeout=(4, self.standard_time))

            if response.status_code == 200:
                art_works = response.json()["contents"]
                for i in art_works:
                    self.collector.add(str(i["illust_id"]))
            else:
                print(f"Failed to get json data from: {url}")

            time.sleep(1)

    def run(self):
        self.get_multi_page_json()
        # self.get_multi_page_json(time_mode="daily_ai", content="all")
        self.collector.collect()
        self.downloader.download()


if __name__ == "__main__":

    RankingCrawler().run()

    """
    思路整理:
    1.首先通过RankingCrawler类的get_multi_page_json方法,获取榜单的json数据,将id添加到collector中
    2.然后通过collector的collect方法,遍历每个id,用get_artworks_urls方法获取id页面内的所有图片的urls,将url添加到downloader中
    3.最后通过downloader的download方法,多线程调用同class内的download_image方法同时下载图片
    """

喜欢的话不妨点个赞吧?有人互动的感觉才能支撑我继续发文章呀~

ps:
有点感慨,这样批量的获取pixiv上大佬们的图,是我还未接触编程时便所期盼的事情,然而当时的我压根读不懂代码,甚至调个参数变量都不会,自2020年8月20日第一次学习编程开始,居然已经过去了3年半多了,现在这样的爬取,仅仅需要我花几个小时就能写出,真是难以想象啊,希望大家也能坚持学习喜欢的事情吧,虽然可能没什么卵用哈哈哈,但是真的很开心!

  • 12
    点赞
  • 13
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值