【P站爬虫】用于下载单一作者所有图片的简易爬虫

基于该文章进行了微小调整,用于批量下载pixiv单一画手的所有图片(包括多p内容)

需要创建:在程序位置创建命名为“png”的文件夹

需要修改:画手userid,你的cookie,你的user—agent,后两者均可以类似方法找到

如遇网络波动导致程序中断,可以修改

for index, i in enumerate(key_id_list, start=1):

中的start值以继续下载,而非从头重新下载

如需下载漫画而非插图,可将

urls = html['body']['illusts']

中的'illusts'更改为‘manga’

注意:默认下载原图,下载缩略图部分的内容未更改,若有需要还请自行修改

import requests
import time
import json

def download_img(url):  # 获取到图片url后定义个函数用于下载
    headers_download = {
        "referer": "https://www.pixiv.net/"
        # 这里携带referer因为p站的图片链接都是防盗链
        # 只要加上这个链接就会认为你是从p站访问过来的就会让你正常访问了
    }
    response = requests.get(url=url, headers=headers_download)
    name = url.split("/")[-1]
    # 将图片链接以斜杠分割后取最后面的信息作为名字,因为爬取的图片有jeg也有png
    with open("./png/" + name, "wb") as f:  # 创建一个以图片链接对应名字的文件,需要自己创建png目录
        f.write(response.content)  # 将图片二进制数据存入,图片也就得到了
    print(name + "下载成功")


url = 'https://www.pixiv.net/ajax/user/画手userid/profile/all'
headers = {
    'User-Agent':
        '你的user—agent',
    'Cookie': '你的cookie'
    , "referer": "https://www.pixiv.net/"
}
params = {
    'lang': 'zh',
    'version': '8a10a9007b94f71b617fe580e88bd087c13a8994',
}
html = requests.get(url=url, headers=headers, params=params).json()
# print(type(html))
# 提取博主所有作品的图片特定数字链接
#urls = html['body']['manga']
urls = html['body']['illusts']
# print(urls)
key_id_list = []
for number, key in enumerate(urls.keys(), start=1):
    # print(key)
    key_id_list.append(int(key))
    # print(number)
# print(key_id_list)
print("作品总数:",number)
time.sleep(2)#休息两秒

# 得到所有图片的链接
url = 'https://www.pixiv.net/ajax/user/20365605/profile/illusts'
params = {
    'ids[]': key_id_list[:48],
    'work_category': 'illustManga',
    'is_first_page': 1,
    'lang': 'zh',
    'version': '8a10a9007b94f71b617fe580e88bd087c13a8994',
}
img_all_url = requests.get(url=url, headers=headers, params=params).json()
# print(img_all_url)
##下载缩略图
# numeric_values = [value['url'] for key, value in img_all_url['body']['works'].items() if key.isdigit()]
# print(numeric_values)
# print(len(numeric_values))
# for index,i in enumerate(numeric_values,start=1):
#     print(index,"图片正在下载中")
#     download_img(i)
#     time.sleep(1)
#     print(index,"图片下载完成")
# 下载原图
param = {
'lang':'zh' #ajax请求时候要带的数据
}
for index, i in enumerate(key_id_list, start=1):
    i=key_id_list[index-1]
    print(index, "图片正在下载中")
    url0 = requests.get(url="https://www.pixiv.net/ajax/illust/" + str(i) +"/pages",params=param,headers=headers).text
    url_list = json.loads(url0)['body'] 
    #用于识别多p内容
    for url in url_list:
        # img_json = requests.get(url=j, headers=headers).json()
        urls = url['urls']['original']
        download_img(urls)
        time.sleep(2)
    print(index, "图片下载完成")

# download_img(urls)
# 下载图片


# with open('pixiv.html','w',encoding='utf-8') as f:
#     f.write(html)

403异常更新:

 以上代码可能出现403异常导致数据读取失败的问题,可能是由于download_img函数中没有添加User-Agent,于是基于该文章,添加了随机的代理IP和User-Agent,完整代码如下:

import requests
import time
import json
import random

def download_img(url):  # 获取到图片url后定义个函数用于下载
    Agent=random.choice(my_headers)
    headers_download = {
        "referer": "https://www.pixiv.net/",
        "User-Agent": Agent
        # 这里携带referer因为p站的图片链接都是防盗链
        # 只要加上这个链接就会认为你是从p站访问过来的就会让你正常访问了
    }
    proxy = random.choice(proxy_list)
    response = requests.get(url=url, headers=headers_download,proxies={'http':proxy})
    name = url.split("/")[-1]
    # 将图片链接以斜杠分割后取最后面的信息作为名字,因为爬取的图片有jeg也有png
    with open("./png/" + name, "wb") as f:  # 创建一个以图片链接对应名字的文件,需要自己创建png目录
        f.write(response.content)  # 将图片二进制数据存入,图片也就得到了
    print(name + "下载成功")


url = 'https://www.pixiv.net/ajax/user/画手userid/profile/all'
headers = {
    'User-Agent':
        '你的user—agent',
    'Cookie': '你的cookie'
    , "referer": "https://www.pixiv.net/"
}
params = {
    'lang': 'zh',
    'version': '8a10a9007b94f71b617fe580e88bd087c13a8994',
}
html = requests.get(url=url, headers=headers, params=params).json()
# print(type(html))
# 提取博主所有作品的图片特定数字链接
#urls = html['body']['manga']
urls = html['body']['illusts']
# print(urls)
key_id_list = []
for number, key in enumerate(urls.keys(), start=1):
    # print(key)
    key_id_list.append(int(key))
    # print(number)
# print(key_id_list)
print("作品总数:",number)
time.sleep(2)#休息两秒

my_headers = [
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
    'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
    'Opera/9.25 (Windows NT 5.1; U; en)',
    'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
    'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
    'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
    'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
    "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
    "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
]

proxy_list = [
    '183.95.80.102:8080',
    '123.160.31.71:8080',
    '115.231.128.79:8080',
    '166.111.77.32:80',
    '43.240.138.31:8080',
    '218.201.98.196:3128'
]

# 得到所有图片的链接
url = 'https://www.pixiv.net/ajax/user/画师pid/profile/illusts'
params = {
    'ids[]': key_id_list[:48],
    'work_category': 'illustManga',
    'is_first_page': 1,
    'lang': 'zh',
    'version': '8a10a9007b94f71b617fe580e88bd087c13a8994',
}
img_all_url = requests.get(url=url, headers=headers, params=params).json()
# print(img_all_url)

##下载缩略图
# numeric_values = [value['url'] for key, value in img_all_url['body']['works'].items() if key.isdigit()]
# print(numeric_values)
# print(len(numeric_values))
# for index,i in enumerate(numeric_values,start=1):
#     print(index,"图片正在下载中")
#     download_img(i)
#     time.sleep(1)
#     print(index,"图片下载完成")
# 下载原图
param = {
'lang':'zh' #ajax请求时候要带的数据
}
for index, i in enumerate(key_id_list, start=1):
    i=key_id_list[index-1]
    print(index, "图片正在下载中")
    url0 = requests.get(url="https://www.pixiv.net/ajax/illust/" + str(i) +"/pages",params=param,headers=headers).text
    url_list = json.loads(url0)['body'] 
    #用于识别多p内容
    for url in url_list:
        # img_json = requests.get(url=j, headers=headers).json()
        urls = url['urls']['original']
        download_img(urls)
        time.sleep(2)
    print(index, "图片下载完成")

# download_img(urls)
# 下载图片


# with open('pixiv.html','w',encoding='utf-8') as f:
#     f.write(html)

参考文章:

1.爬虫p站,下载喜欢的博主所有作品——7月2日_爬虫p站图片-CSDN博客

2.爬虫实践-多线程Pixiv_p站的cookie-CSDN博客

3.使用python爬取pixiv.net的图片?

4.Python爬虫403错误的终极解决方案

5.关于Requests代理,你应该知道的

6.python爬虫,如何在代理的IP被封后立刻换下一个IP继续任务?

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值