爬虫程序,爬取图片

#!/usr/bin/env python
# -*- coding:utf-8 -*-

# 获取网页源码/下载网页/图片/视频/音频....
import requests
# 解析网页相关数据
from lxml import etree
# 操作文件夹/路径
import os


# 1. 下载网页源码
# 2. 解析网页源码(难度比较大)
# 3. 存储相关数据
url = "http://www.ivsky.com/tupian/ziranfengguang/"
response = requests.get(url)
# content text  只不过数据类型不一样

# 把网页源码解析为根节点
root = etree.HTML(response.content)
# 根据xpath来定位相关数据
# ul li a
# 注意:xpath返回的结果一定是个列表
a_list = root.xpath("//ul[@class='tpmenu']/li/a")
# 对列表进行切片,跳过"所有分类"这个元素
# a_list = a_list[1:]
for a in a_list[1:]:
    # print(a) # <Element *****>
    # text() 表示获取标签之间的文本内容
    big_title = a.xpath("text()")[0]
    # 获取标签中的某个属性   @属性名称
    big_url = a.xpath("@href")[0]
    if not big_url.startswith("http"):
        big_url = "http://www.ivsky.com" + big_url
    # print(big_title, big_url)

    big_response = requests.get(big_url)
    big_root = etree.HTML(big_response.content)
    big_a_list = big_root.xpath("//div[@class='sline']/div/a")
    for big_a in big_a_list:
        small_title = big_a.xpath("text()")[0]
        small_url = big_a.xpath("@href")[0]
        if not small_url.startswith("http"):
            small_url = "http://www.ivsky.com" + small_url
        print(small_title, small_url)

        # D:\python项目\Django_Scrapy
        # \n 转义字符,表示换行
        # \\n 表示n
        # /n 表示n
        path = "images/" + big_title + "/" + small_title
        # 如果路径对应的文件夹不存在,目的防止出现"文件夹已存在,创建失败"错误
        if not os.path.exists(path):
            # makedirs = MakeDirectorys  根据路径创建文件夹
            os.makedirs(path)

        page = 1
        old_small_url = small_url
        while True:
            small_response = requests.get(small_url)
            small_root = etree.HTML(small_response.content)
            img_list = small_root.xpath("//div[@class='il_img']/a/img")
            if not img_list:
                break
            for idx, img in enumerate(img_list):
                src = img.xpath("@src")[0]
                # name = src.split("/")[-1]
                name = img.xpath("@alt")[0] + str(page) + "_" + str(idx) + ".jpg"
                img_response = requests.get(src)
                f = open(path+"/"+name, "wb")
                f.write(img_response.content)
                f.close()
            page += 1
            # ziranfengguan/1.html
            # ziranfengguan/1.html/2.html
            small_url = old_small_url + "/index_%s.html" % page

 

转载于:https://my.oschina.net/u/3771014/blog/1624089

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值