# -*- coding:utf-8 -*-
import os
import requests
import urllib
from pyquery import PyQuery
import uuid
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome"
"/75.0.3770.142 Safari/537.36"
}
def href_url_download(): # 一.获取"图片的URL"
# 花(1,28)
for pages in range(1, 28):
page = str(pages)
url = "https://www.geimian.com/wx/tag/花卉/page/" + page # 1.第一级网页分析
# print(url)
txt = requests.get(url, headers=headers).text # 2.获取第一级网页URL及headers
doc = PyQuery(txt)
# print(doc)
pages_url = doc(".index_wz_lb>a").items() # 3.分析第一级网页特点
for page_url in pages_url:
img_url = "https:" + page_url.attr("href") # 4.获取第二级网页
print(img_url)
txt2 = requests.get(img_url, headers=headers).text # 5.获取第二级网页URL及headers
doc2 = PyQuery(txt2)
# print(doc2)
class_name = doc2("#db_biaoti h2").text() # 7.获取到标签所有页面
print(class_name)
images_url = doc2("#imgbdjz #inr_zx p img").items() # 6.分析第二级网页特点
images_url1 = doc2("#imgbdjz #xcnr_zx p img").items() # 6.分析第二级网页特点
# 二.下载"图片"
image_download(images_url, images_url1, class_name)
def image_download(images_url, images_url1, class_name): # 二.下载"图片"
folder_path = "image/" + class_name
if not os.path.exists(folder_path):
os.makedirs(folder_path)
# print(folder_path)
for image in images_url:
image_url = image.attr("title")
print(image_url)
image_name = class_name + "_" + str(uuid.uuid1()) + ".jpg"
content = requests.get(image_url, headers=headers)
with open(folder_path + "/" + image_name, "wb") as f:
f.write(content.content)
for image1 in images_url1:
image_url1 = image1.attr("title")
print(image_url1)
image_name = class_name + "_" + str(uuid.uuid1()) + ".jpg"
content = requests.get(image_url1, headers=headers)
with open(folder_path + "/" + image_name, "wb") as f:
f.write(content.content)
if __name__ == '__main__':
# 1.一.获取"图片的URL"
href_url_download()
给面小站网站图片爬虫
最新推荐文章于 2024-03-25 13:55:15 发布