#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
爬取王者荣耀官网所有英雄的皮肤墙纸,并保存到本地。
需要获取每个英雄的皮肤图片链接,需要遍历所有英雄的皮肤,并将其保存到本地。
好好学习,天天向上
"""
import requests
from urllib import parse
import os
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36'
}
def func(page=0):
url = f'https://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi?iTypeId=2&sDataType=JSON&iListNum=20&page={page}&iActId=2735'
response = requests.get(url=url, headers=headers).json()
# 总页数
iTotalPages = int(response['iTotalPages'])
# 列表数据
img_list = response['List']
print(f"正在爬取第{page}页的数据".center(100, '*'))
for img_info in img_list:
# 图片名称
img_name = parse.unquote(img_info['sProdName']).strip()
# 图片地址
sProdImgNo_2 = img_info['sProdImgNo_2']
save(img_name, sProdImgNo_2, "1024x786")
sProdImgNo_3 = img_info['sProdImgNo_3']
save(img_name, sProdImgNo_3, "1280x720")
sProdImgNo_4 = img_info['sProdImgNo_4']
save(img_name, sProdImgNo_4, "1280x1024")
sProdImgNo_5 = img_info['sProdImgNo_5']
save(img_name, sProdImgNo_5, "1440x900")
sProdImgNo_6 = img_info['sProdImgNo_6']
save(img_name, sProdImgNo_6, "1920x1080")
sProdImgNo_7 = img_info['sProdImgNo_7']
save(img_name, sProdImgNo_7, "1920x1200")
sProdImgNo_8 = img_info['sProdImgNo_8']
save(img_name, sProdImgNo_8, "1920x1440")
# 下一页
if page < iTotalPages:
func(page + 1)
# 保存图片到目录
def save(img_name, url, size):
# 找到真实的文件路径
url = parse.unquote(url).replace("/200", "/0")
# 文件保存路径
path = os.getcwd() + '\\' + img_name.replace(':', '=') + '\\'
# 如果目录不存在就创建目录
if not os.path.exists(path):
os.makedirs(path)
try:
with open(f"{path}{img_name.replace(':', '=')}_{size}.jpg", 'wb') as f:
f.write(requests.get(url).content)
print(f"{img_name}_{size}.jpg,下载成功。")
except Exception as e:
print(f"{img_name}_{size}.jpg,下载失败。")
pass
if __name__ == '__main__':
func()
爬取王者荣耀官网所有英雄的皮肤墙纸,并保存到本地
于 2024-03-02 20:44:50 首次发布