王者荣耀皮肤爬取
满满干货,可以在下方留言一起交流,有需要的拿去,正在持续学习,会更新爬虫相关的文章。
"""
Created at 21:27 at Mar 17,2020
@author: QingLiu
"""
import requests
import os
import json
import time
time.perf_counter()
def get_html():
try:
herolist_url = 'https://pvp.qq.com/web201605/js/herolist.json'
kv = {'user-agent': 'Mozilla/5.0'}
response = requests.get(herolist_url,headers=kv,timeout=20)
response.raise_for_status()
txt = response.text
py_txt = json.loads(txt)
return py_txt
except:
print("异常")
def return_info(py_txt):
save_dir = "D://honors//"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for each in py_txt:
if each.get('skin_name',False):
skin_names = each['skin_name'].split('|')
hero_num = each['ename']
hero_name = each['cname']
for i in range(len(skin_names)):
skin_name = skin_names[i]
save_file_name = save_dir + str(hero_num) + "-" + hero_name + "-" + skin_name + '.jpg'
skin_url_name = 'http://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/{}/{}-bigskin-{}.jpg'.format(hero_num, hero_num, str(i+1))
response = requests.get(skin_url_name)
with open(save_file_name,'wb') as f:
f.write(response.content)
elif each.get('hero_type2',False):
hero_num = each['ename']
hero_name = each['cname']
save_file_name = save_dir + str(hero_num) + "-" + hero_name + "-" + '.jpg'
skin_url_name = 'http://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/{}/{}-bigskin-1.jpg'.format(hero_num, hero_num)
response = requests.get(skin_url_name)
with open(save_file_name, 'wb') as f:
f.write(response.content)
else:
print('没有爬取完')
def main():
py = get_html()
return_info(py)
print(f"爬取时间共运行{time.perf_counter()}")
if __name__ == '__main__':
main()
为梦起航