多线程爬取妹子图网图片 Python

33 篇文章 0 订阅
16 篇文章 0 订阅
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File    : Crawl_meizitu.py.py
@Time    : 2019/7/25 13:24
@Author  : Sound_of_Silence
"""

import requests
import re
import time
import random
import os
import traceback
from multiprocessing.dummy import Pool
from bs4 import BeautifulSoup
from lxml import etree


def get_text(url, headers, code='utf-8'):
    try:
        r = requests.get(url, headers=headers, timeout=30)
        r.encoding = code
        r.raise_for_status()
        return r.text
    except BaseException:
        return ''


def lxml_get_detail_title_and_url(page_url, title_list, url_list):
    try:
        html = get_text(page_url, headers=hds)
        ehtml = etree.HTML(html)
        titles = ehtml.xpath('//*[@id="pins"]/li/span[1]/a/text()')
        hrefs = ehtml.xpath('//*[@id="pins"]/li/span[1]/a/@href')
        for title, href in zip(titles, hrefs):
            title_list.append(title)
            url_list.append(href)
    except BaseException:
        traceback.print_exc()
        pass


def sub_page_url_list(url, deph):
    deph = str(deph.encode('utf-8')).lstrip('b')
    url = str(url.encode('utf-8')).lstrip('b')
    ls = [eval(url)]
    ls.extend([eval(url) + '/{}'.format(i)
               for i in range(2, int(eval(deph)) + 1)])
    return ls


def crawl_pic(detail_pic_list):
    for j in range(len(detail_pic_list)):
        try:
            time.sleep(random.random())
            html = requests.get(detail_pic_list[j], headers=hds).text
            hl = etree.HTML(html)
            pic_url = hl.xpath(
                '/html/body/div[2]/div[1]/div[3]/p/a/img/@src')[0]  # 列表形式,需提取
            time.sleep(random.random())
            r = requests.get(pic_url, headers=hds)
            with open('{}.jpg'.format(j), 'wb') as file:
                file.write(r.content)
        except BaseException:
            print('Error while crawl pic')
            traceback.print_exc()
            continue


def crawl_pic_html(titles, urls, filepath):
    for i in range(len(titles)):
        os.chdir(filepath)
        if not os.path.exists(filepath + '\\mm_pics\\{}'.format(titles[i])):
            os.makedirs(filepath + '\\mm_pics\\{}'.format(titles[i]))
        os.chdir(filepath + '\\mm_pics\\{}'.format(titles[i]))
        try:
            html = get_text(urls[i], headers=hds)
            ehtml = etree.HTML(html)
            length = ehtml.xpath(
                '/html/body/div[2]/div[1]/div[4]/a/span/text()')[-2]
            detail_pic_list = sub_page_url_list(urls[i], length)
            crawl_pic(detail_pic_list)
        except BaseException:
            print('Error while get length!')
            traceback.print_exc()
            return ''


def do(depth):
    global hds
    hds = {
        'referer': 'https://www.mzitu.com/tag/youhuo/',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
        'Chrome/75.0.3770.100 Safari/537.36'}

    page_url_list = ['https://www.mzitu.com/mm/']
    if depth > 1:
        page_url_list.extend(['https://www.mzitu.com/mm/page/{}'.format(i)
                              for i in range(2, depth + 1)])

    title_list, url_list = [], []
    fpath = 'I:\\'
    for page_url in page_url_list:
        lxml_get_detail_title_and_url(page_url, title_list, url_list)
        crawl_pic_html(title_list, url_list, fpath)


if __name__ == '__main__':
    start = time.ctime()
    p = Pool(8)
    p.apply_async(do, args=(12,))
    p.close()
    p.join()
    end = time.ctime()
    print("耗时{:.1f}min".format((end - start)/60))

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值