python爬虫的动力

学习的目的:学以致用

今天的文章不适合说的太多,只可意会。
纪念学习python爬虫的一个小成果。
今天的主题就是:爬取这个https://mzitu.com网站的图片

不多说的了,上代码

import requests
from lxml import etree
import os
import time
import random


def get_header():
    user_agents = (list(set([
        'Mozilla/5.0',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
        'Opera/8.0 (Windows NT 5.1; U; en)',
        'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50',
        'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
        'Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2 ',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.71 Safari/537.36',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
        'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.133 Safari/534.16',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/30.0.1599.101 Safari/537.36',
        "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36",
        'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.11 TaoBrowser/2.0 Safari/536.11',
        'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)',
        'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0) ',
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
        "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
        "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
        "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
        "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
        "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
        "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
        "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
        "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14"
    ])))
    headers = {
        'User-Agent': str(random.choice(user_agents)),
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cache-control': 'max-age=0',
        #'cookie': 'Hm_lvt_dbc355aef238b6c32b43eacbbf161c3c=1566016859,1566121434,1566136976,1566608298; Hm_lpvt_dbc355aef238b6c32b43eacbbf161c3c=1566608298',
        'upgrade-insecure-requests': '1'
    }
    return headers


def get_page_html(url):
    """获取页面https://www.mzitu.com/的html"""
    response = requests.get(url, headers=get_header())
    page_html = response.text
    html = etree.HTML(page_html)
    # 筛选出所有的item url
    all_items_url = html.xpath('//*[@id="pins"]/li/a/@href')
    # 筛选出 每一个item 的名字 做文件夹
    all_items_name = html.xpath('//*[@id="pins"]/li/span/a/text()')
    for (name, url) in zip(all_items_name, all_items_url):
        print("正在爬取:{}图片集,地址是{}".format(name, url))
        get_items_detail(name, url)
        # 减轻服务器压力
        time.sleep(random.randint(2, 5))


def get_items_detail(name, url):
    """获取每一项item的所指向的图片集 的地址"""
    response = requests.get(url, headers=get_header())
    page_html = response.text
    html = etree.HTML(page_html)
    # 页码 //div[@class="pagenavi"]/a[last()-1]/span/text()
    max_page = html.xpath('//div[@class="pagenavi"]/a[last()-1]/span/text()')[0]
    # 拼接产生每一页图片的地址
    for page in range(1, int(max_page)+1):
        requests_url = url + '/' + str(page)
        referer = url + '/' + str(page-1)
        get_each_page_img_url(name, referer, requests_url)


def get_each_page_img_url(name, referer, url):
    """获取每一页图片的地址"""
    response = requests.get(url, headers=get_header())
    page_html = response.text
    html = etree.HTML(page_html)
    # 图片链接 //div[@class="main-image"]/p/a/img/@src
    page_url = html.xpath('//div[@class="main-image"]/p/a/img/@src')[0]
    # 下载图片
    download_pic(name, referer, page_url)


def download_pic(name, referer, url):
    filename = url.split('/')[-1]
    """创建name文件夹"""
    make_dir('Picture/{}'.format(name))
    print('正在下载图片,地址是{}'.format(url))
    headers = get_header()
    # referer 必须添加要不然 下载的图片就没有内容,我也不专业,不懂。。。
    headers['referer'] = referer
    response = requests.get(url, headers=headers)
    with open('Picture/{}/{}'.format(name, filename), 'wb') as f:
        f.write(response.content)


def make_dir(name):
    """创建文件夹"""
    if not os.path.exists(name):
        os.mkdir(name)


def main():
    base_url = "https://www.mzitu.com/"
    # 获取页码 //div[@class="nav-links"]/a[last()-1]/text()
    response = requests.get(base_url, headers=get_header())
    page_html = etree.HTML(response.text)
    page_max_num = page_html.xpath('//div[@class="nav-links"]/a[last()-1]/text()')[0]
    for page_num in range(1, int(page_max_num)+1):
        url = base_url + 'page/{}/'.format(page_num)
        get_page_html(url)
        time.sleep(random.randint(3, 6))


if __name__ == '__main__':
    main()

下载过程
成果
具体的就不展示了。
代码可以直接运行。

注意:需要在文件的同级目录下创建Picture文件夹,爬取的所有内容都会存放在这个文件夹中。

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值