王者荣耀高清壁纸下载(多线程、详细解析)
确定爬取目标网页
进入王者荣耀官方网站,点击游戏资料>>>>游戏壁纸
锁定所要爬取的图片信息
- 检查,进入开发者工具栏
- 选取Network,Ctrl + R刷新
- 经过筛选,发现图片信息在
workList_inc.cgi?activityId=2735&sVerifyCode=ABCD&…267733&iActId=2735&iModuleId=2735&_=1632043658985
标签页中。
而图片信息在Responce中以json格式存储。 - 在浏览器中搜索json在线解析,将json信息复制解析 (要将jQuery1710016917133802359485_1632043658845(和最后面的)去掉)
- 解析结果如图:
- 浅蓝色链接便为图片信息,八个链接表示不同的分辨率图片,但url是经过编码的,需要解码。
- 解码方式如下:
from urllib import parse
result1 = parse.unquote("http%3A%2F%2Fshp.qpic.cn%2Fishow%2F2735082412%2F1629781114_84828260_705_sProdImgNo_5.jpg%2F200")
print(result1)
- 访问url后发现就是目标图片。
- 接下来创建线程进行爬取。
编写代码
# -*- coding = utf-8 -*-
# @Time : 2021/9/7 20:45
# @Author :CCUT_chao
# @File : run_main_V2.py
# @Software : PyCharm
import requests
from urllib import parse
from urllib import request
import os
import threading
import queue
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
'referer': 'https://pvp.qq.com/'
}
class Producer(threading.Thread):
def __init__(self, page_queue, image_queue, *args, **kwargs):
super(Producer, self).__init__(*args, **kwargs)
self.page_queue = page_queue
self.image_queue = image_queue
def run(self) -> None:
while not self.page_queue.empty():
page_url = self.page_queue.get()
resp = requests.get(page_url, headers=headers)
result = resp.json()
datas = result['List']
for data in datas:
image_urls = extract_images(data)
name = parse.unquote(data['sProdName']).replace("1:1", "").strip()
dir_path = os.path.join("images", name)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
for index, image_url in enumerate(image_urls):
self.image_queue.put({"image_url": image_url, "image_path": os.path.join(dir_path, "%d.jpg" % (index + 1))})
class Consumer(threading.Thread):
def __init__(self, image_queue, *args, **kwargs):
super(Consumer, self).__init__(*args, **kwargs)
self.image_queue = image_queue
def run(self) -> None:
while True:
try:
image_obj = self.image_queue.get(timeout=10)
image_url = image_obj.get("image_url")
image_path = image_obj.get("image_path")
request.urlretrieve(image_url, image_path)
try:
request.urlretrieve(image_url, image_path)
print(image_path + "下载完成!")
except:
print(image_path+"下载失败!")
except:
break
def extract_images(data):
img_urls = []
for x in range(1, 9):
img_url = parse.unquote(data['sProdImgNo_%d' % x]).replace("200", "0")
img_urls.append(img_url)
return img_urls
def main():
page_queue = queue.Queue(18)
image_queue = queue.Queue(1000)
for x in range(0, 18):
page_url = 'https://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi?activityId=2735&sVerifyCode=ABCD&sDataType=JSON&iListNum=20&totalpage=0&page={page}&iOrder=0&iSortNumClose=1&iAMSActivityId=51991&_everyRead=true&iTypeId=2&iFlowId=267733&iActId=2735&iModuleId=2735&_=1631013702117'.format(page=x)
page_queue.put(page_url)
for x in range(3):
th = Producer(page_queue, image_queue, name='生产者%d号' % x)
th.start()
for x in range(5):
th = Consumer(image_queue, name='消费者%d号' % x)
th.start()
if __name__ == '__main__':
main()
这篇文章的内容可能会有一些不足之处和错误,希望大家在评论区多多提出意见,我也会积极改正积极交流,以后也会分享一些自己的学习经历,如果这篇文章对你有所帮助,请点个赞吧!谢谢鼓励!