多进程爬取妹子图

import requests
from lxml import etree
import os

def download_img(img_url_referer_url):
    (img_url, referer) = img_url_referer_url
    print('Downloading ......' + img_url)
    headers = {
        #'Cookie': 'Hm_lvt_dbc355aef238b6c32b43eacbbf161c3c = 1534726766;Hm_lpvt_dbc355aef238b6c32b43eacbbf161c3c = 1534727069',
        'referer': referer,
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
    }
    # print(headers)
    if os.path.exists('download'):
        pass
    else:
        os.mkdir('download')
    filename = 'download/'+ img_url.split('/')[-1]
    #request.urlretrieve(img_url, filename)
    response = requests.get(img_url, headers = headers)
    with open(filename, 'wb') as f:
        f.write(response.content)

def parse_detailed_page(url_href, queue):

    #for i in range(1, )
    response = requests.get(url_href)
    html_ele = etree.HTML(response.text)
    max_page = html_ele.xpath('//div[@class="pagenavi"]/a/span/text()')[-2]
    # print(max_page)
    for i in range(1, int(max_page)+1):
        page_url = url_href + '/' + str(i)
        response  = requests.get(page_url)
        html_ele = etree.HTML(response.text)
        img_url = html_ele.xpath('//div[@class="main-image"]/p/a/img/@src')[0]
        # print(img_url)
        #download_img(img_url, url_href)
        queue.put((img_url, url_href))


def get_all_image_url(queue):
    url = 'http://www.mzitu.com/'
    response = requests.get(url)

    # with open('mzitu.html', 'wb') as f:
    #     f.write(response.content)

    html_ele = etree.HTML(response.text)
    href_list = html_ele.xpath('//ul[@id="pins"]/li/a/@href')
    for href in href_list:
        # print(href)
        parse_detailed_page(href, queue)

if __name__ == '__main__':
    # 创建一线程或者进程
    import multiprocessing
    from multiprocessing import Queue, Pool

    # 以下三行主要是获取image的url, 放到我们的queue中
    q = Queue()
    p = multiprocessing.Process(target=get_all_image_url, args=(q, ))
    p.start()

    download_pool = Pool(5)
    for i in range(0, 1111):
        image_url_referer_url = q.get()
        # print(image_url_referer_url)
        download_pool.apply_async(download_img, (image_url_referer_url,))

    download_pool.close()
    download_pool.join()
    # 程序最后退出前进行join
    p.join()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值