1. 数据分析
源网址
首先壁纸数据不在网站源代码中,其中数据被保存到json中。
图片链接因为带汉字,需要通过urllib库中parse.unquote方法进行解析后在进行获取。
其中页数是通过page传递。可以删除jsoncallback字段,保证通过request.get方法获取到的响应可以直接通过json方法转化为字典。
2. 爬取代码
get请求需要传递参数处理函数
import re
def handle_load(msg):
head_dict = {}
# 文本按照行分割
for data in msg.splitlines():
# 过滤空数据
if not data:
continue
# 去掉字符串左边的空格
data = data.lstrip()
html = re.findall('([a-zA-Z-]*?)\s*:\s*(.*?$)', data)
for key, value in html:
head_dict[key] = value
return head_dict
爬取主函数:
import urllib.request
from urllib import parse
from queue import Queue
import requests
import threading
from Handle import handle_load
import os
url = "https://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi"
queue = Queue(100)
headers = {
'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/'
'105.0.0.0 Safari/537.36'
}
# 生产者
def get_json(time):
datas = f'''
activityId: 2735
sVerifyCode: ABCD
sDataType: JSON
iListNum: 20
totalpage: 0
page: {time}
iOrder: 0
iSortNumClose: 1
iAMSActivityId: 51991
_everyRead: true
iTypeId: 2
iFlowId: 267733
iActId: 2735
iModuleId: 2735
_: 1663639429423
'''
response = requests.get(url, headers=headers, params=handle_load(datas))
for item in response.json()['List']:
name = parse.unquote(item['sProdName'])
# 获取图片链接
for img in range(2, 9):
url_img = parse.unquote(item[f'sProdImgNo_{img}']).replace('jpg/200', 'jpg/0')
queue.put([name, url_img])
# 消费者
def save_img():
while True:
print(f'任务队列大小为:{queue.qsize()}')
data = []
try:
data = queue.get(timeout=3)
except Exception:
# 3秒队列还为空,说明爬取完毕
break
path = os.path.join('E:\\PythonCode\\TestImg', data[0])
# 避免重名
try:
os.makedirs(path)
except Exception:
pass
path = os.path.join(path, data[1].split('/')[-2])
urllib.request.urlretrieve(data[1], path)
print(f"{data[1].split('/')[-2]}保存成功")
# 爬3页图片
for page in range(3):
thread = threading.Thread(target=get_json, args=(page,))
thread.start()
for times in range(12):
thread = threading.Thread(target=save_img)
thread.start()