# -*- coding:utf-8 -*-
import os
import requests
import urllib
from pyquery import PyQuery
import uuid
from pypinyin import pinyin
import unicodedata
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome"
"/75.0.3770.142 Safari/537.36"
}
def href_url_download():
# 1.填写要爬取关键词的list.txt
keyword_list = open("list.txt", 'r', encoding='utf-8')
lines = keyword_list.readlines()
keyword_list.close()
for keyword in lines:
keyword = keyword.strip()
print(keyword)
# ------------------------汉字转化成拼音-------------------------------------
# word = pinyin(keyword)
# pin_yin = "".join(str(i[0]) for i in word)
# print(pin_yin)
# res = unicodedata.normalize('NFKD', pin_yin).encode('ascii', 'ignore').decode("ascii")
# print(res)
# -------------------------汉字转化成拼音-----------------------------
# 2.修改爬取的页数(1,10),默认爬取9页
for pages in range(1, 10):
page = str(pages)
# -----------------------核心分析模块--------------------------------------
# -----------------修改模块------------------------------------
# http://www.51yuansu.com/search/xiangjiao-40-0-0-0-1/
# http://www.sdphoto.com.cn/search/21_0_0_1_0_苹果_2.html
url = "http://www.sdphoto.com.cn/search/21_0_0_1_0_" + keyword +"_" + page + ".html"
print(url)
# ------------------------------------------------
try:
txt = requests.get(url, headers=headers).text
doc = PyQuery(txt)
# print(doc)
# -----------------修改模块------------------------------------
pages_url = doc(".imgcon .preview").items()
for page_url in pages_url:
html_url = page_url.attr("href")
html_url = "http://www.sdphoto.com.cn" + html_url
print(html_url)
# ------------------------------------------------
# http://www.sdphoto.com.cn/pic/456803.html
txt2 = requests.get(html_url, headers=headers).text
doc2 = PyQuery(txt2)
# print(doc2)
# -----------------修改模块-------------------------------
images = doc2(".pictw td:nth-child(2) img").items()
for image in images:
image_url = "http://www.sdphoto.com.cn" + image.attr("src")
print(image_url)
# -----------------------核心分析模块--------------------------------------
image_download(keyword, image_url)
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
def image_download(keyword, image_url):
# 3.图片存储路径
folder_path = "./image/" + keyword + "/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# print(folder_path)
image = keyword + "_" + str(uuid.uuid1()) + ".jpg" # 图片命名
print(image)
try:
content = requests.get(image_url, headers=headers)
with open(folder_path + image, "wb") as f:
f.write(content.content)
except urllib.error.HTTPError:
print("Internal Server Error")
except requests.exceptions.ConnectionError as e:
print("requests.exceptions.ConnectionError")
if __name__ == '__main__':
href_url_download()
山东图片库图片爬虫
最新推荐文章于 2021-05-13 11:59:03 发布