python--多线程爬取王者荣耀高清壁纸

多线程爬取王者荣耀高清壁纸

import threading
import requests
import random
from urllib import request
from urllib import parse
import os
import queue

# 随机获取浏览器的UserAgent
UserAgent = random.choice([
    'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36',
    'Mozilla / 5.0(Windows NT 10.0;Win64;x64;rv: 78.0) Gecko / 20100101Firefox / 78.0',
    'Mozilla / 4.0(compatible;MSIE7.0;WindowsNT6.1;WOW64;Trident / 5.0;SLCC2;.NETCLR2.0.50727;.NETCLR3.5.30729;.NETCLR3.0.30729;MediaCenterPC6.0;InfoPath.3;.NET4.0C;.NET4.0E) QQBrowser / 6.9.11079.201',
    'Mozilla / 5.0(Macintosh;U;IntelMacOSX10_6_8;en - us) AppleWebKit / 534.50(KHTML, likeGecko) Version / 5.1Safari / 534.50',
    'Mozilla / 5.0(iPhone;U;CPUiPhoneOS4_3_3likeMacOSX;en - us) AppleWebKit / 533.17.9(KHTML, likeGecko) Version / 5.0.2Mobile / 8J2Safari / 6533.18.5',
    'Mozilla / 5.0(Windows NT 6.1)AppleWebKit / 535.1(KHTML, likeGecko) Chrome / 13.0.782.41Safari / 535.1QQBrowser / 6.9.11079.201',
    'Mozilla / 5.0(compatible;MSIE9.0;WindowsNT6.1;WOW64;Trident / 5.0;SLCC2;.NETCLR2.0.50727;.NETCLR3.5.30729;.NETCLR3.0.30729;MediaCenterPC6.0;InfoPath.3;.NET4.0C;.NET4.0E)',
    'Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1',
    'Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0',
    'Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999',
    'Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11'

])

# 定义请求头
headers = {
    'user-agent': UserAgent,
    'referer': 'https://pvp.qq.com/web201605/wallpaper.shtml'
}

# 生产者
class Producer(threading.Thread):
    def __init__(self,page_queue,image_queue,*args,**kwargs):
        super(Producer,self).__init__(*args,**kwargs)
        self.page_queue = page_queue
        self.image_queue = image_queue

    def run(self) -> None:
        while not self.page_queue.empty():
            page_url = self.page_queue.get()

            resp = requests.get(page_url,headers=headers)
            result = resp.json()
            datas = result['List']
            for data in datas:
                image_urls = extract_images(data)
                name = parse.unquote(data['sProdName']).replace('1:1','').strip()
                dir_path = os.path.join('image',name)

                # 创建文件夹(如果没有文件夹再进行创建)
                if not os.path.exists(dir_path):
                    os.mkdir(dir_path)

                # 把图片的url放到对列中
                for index,image_url in enumerate(image_urls):
                    self.image_queue.put({'image_url':image_url,'image_path':os.path.join(dir_path,'%d.png'%(index+1))})


# 消费者
class Consumer(threading.Thread):
    def __init__(self,image_queue,*args,**kwargs):
        super(Consumer,self).__init__(*args,**kwargs)
        self.image_queue =image_queue

    def run(self) -> None:
        while True:
            try:
                image_obj = self.image_queue.get(timeout=10)
                image_url = image_obj.get('image_url')
                image_path = image_obj.get('image_path')
                try:
                    request.urlretrieve(image_url,image_path)
                    print(image_path,'下载成功')
                except:
                    print(image_path,'下载失败')
            except:
                break

def extract_images(data):
    image_urls = []
    for i in range(1,9):
        image_url = parse.unquote(data['sProdImgNo_%d'%i]).replace('200','0').strip()
        image_urls.append(image_url)

    return image_urls

# 主函数
def main():
    # 创建页数的对列
    page_queue = queue.Queue()

    # 创建图片的对列
    image_queue = queue.Queue()

   
    start_page = int(input('起始页:'))
    end_page = int(input('结束页:'))
    start_page = start_page - 1
    end_page = end_page + 1

    for i in range(start_page,end_page):
        page_url = 'https://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi?activityId=2735&sVerifyCode=ABCD&sDataType=JSON&iListNum=20&totalpage=0&page={page}&iOrder=0&iSortNumClose=1&_everyRead=true&iTypeId=2&iFlowId=267733&iActId=2735&iModuleId=2735&_=1597591362368'.format(page=i)
        # 把每一页的url添加到对列中
        page_queue.put(page_url)

        # 创建5个生产者
    for i in range(6):
        producer = Producer(page_queue,image_queue)
        producer.start()

    # 创建8个消费者
    for i in range(15):
        consumer = Consumer(image_queue)
        consumer.start()

if __name__ == '__main__':
    main()


  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

pinuscembra

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值