山东图片库图片爬虫

# -*- coding:utf-8 -*-
import os
import requests
import urllib
from pyquery import PyQuery
import uuid
from pypinyin import pinyin
import unicodedata

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome"
                  "/75.0.3770.142 Safari/537.36"
}


def href_url_download():
    # 1.填写要爬取关键词的list.txt
    keyword_list = open("list.txt", 'r', encoding='utf-8')
    lines = keyword_list.readlines()
    keyword_list.close()
    for keyword in lines:
        keyword = keyword.strip()
        print(keyword)
        # ------------------------汉字转化成拼音-------------------------------------
        # word = pinyin(keyword)
        # pin_yin = "".join(str(i[0]) for i in word)
        # print(pin_yin)
        # res = unicodedata.normalize('NFKD', pin_yin).encode('ascii', 'ignore').decode("ascii")
        # print(res)
        # -------------------------汉字转化成拼音-----------------------------
        # 2.修改爬取的页数(1,10),默认爬取9页
        for pages in range(1, 10):
            page = str(pages)
            # -----------------------核心分析模块--------------------------------------
            # -----------------修改模块------------------------------------
            # http://www.51yuansu.com/search/xiangjiao-40-0-0-0-1/
            # http://www.sdphoto.com.cn/search/21_0_0_1_0_苹果_2.html
            url = "http://www.sdphoto.com.cn/search/21_0_0_1_0_" + keyword +"_" + page + ".html"
            print(url)
            # ------------------------------------------------
            try:
                txt = requests.get(url, headers=headers).text
                doc = PyQuery(txt)
                # print(doc)
                # -----------------修改模块------------------------------------
                pages_url = doc(".imgcon .preview").items()
                for page_url in pages_url:
                    html_url = page_url.attr("href")
                    html_url = "http://www.sdphoto.com.cn" + html_url
                    print(html_url)
                    # ------------------------------------------------
                    # http://www.sdphoto.com.cn/pic/456803.html
                    txt2 = requests.get(html_url, headers=headers).text
                    doc2 = PyQuery(txt2)
                    # print(doc2)
                    # -----------------修改模块-------------------------------
                    images = doc2(".pictw td:nth-child(2) img").items()
                    for image in images:
                        image_url = "http://www.sdphoto.com.cn" + image.attr("src")
                        print(image_url)
                        # -----------------------核心分析模块--------------------------------------
                        image_download(keyword, image_url)
            except requests.exceptions.ConnectionError as e:
                print("requests.exceptions.ConnectionError")


def image_download(keyword, image_url):
    # 3.图片存储路径
    folder_path = "./image/" + keyword + "/"
    if not os.path.exists(folder_path):
        os.makedirs(folder_path)
        # print(folder_path)
    image = keyword + "_" + str(uuid.uuid1()) + ".jpg"  # 图片命名
    print(image)
    try:
        content = requests.get(image_url, headers=headers)
        with open(folder_path + image, "wb") as f:
            f.write(content.content)
    except urllib.error.HTTPError:
        print("Internal Server Error")
    except requests.exceptions.ConnectionError as e:
        print("requests.exceptions.ConnectionError")


if __name__ == '__main__':
    href_url_download()

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

weixin_38185649

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值