请复制粘贴的小伙伴注意,如果该程序被执行不主动停止的话,将会在你的电脑 D:/图片及视频/图片/爬虫 目录中一直自动创建分类文件夹并自动下载图片直到程序循环结束。
运行条件:需要在pycharm中安装request、lxml代码如下
pip install request
pip install lxml
import os
import requests
from lxml import etree
page = 1
while page <= 11:
url = f"https://www.mmonly.cc/mmtp/qcmn/list_16_{page}.html"
page+=1
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36"
}
response = requests.get(url,headers=headers)
picture_html_str = response.content.decode("gbk")
picture_html = etree.HTML(picture_html_str)
picture_root = picture_html.xpath(".//div[@class='item masonry_brick masonry-brick']")
for picture in picture_root:
# time.sleep(2)
#文件名
dir_name = picture.xpath(".//div[@class='title']/span/a/text()")[0]
#图片地址
picture_adr = picture.xpath(".//div[@class='title']//a/@href")[0]
#获取该类图片数位置
picture_num_adr_str = picture.xpath(".//div[@class='items_likes']/text()")
#对获取该位置的文本进行格式化
picture_num_str = "".join(picture_num_adr_str).split("共")[-1]
#获取该图片数量
picture_num = int(picture_num_str.split("张")[0])
# 创建目录
try:
os.mkdir(f"D:/图片及视频/图片/爬虫/{dir_name}")
except Exception as e:
print(e)
#为防止第二次循环出现地址追加固定前部分地址字符
picture_adr_fix = picture_adr[:-5]
for i in range(picture_num):
#获取每一种图的所有图链接
img_response = requests.get(picture_adr, headers=headers)
#每种图的地址
picture_adr = picture_adr_fix + f"_{i+2}.html"
img_html_str = img_response.content.decode("gbk")
img_html = etree.HTML(img_html_str)
# 获取图片的url
img_url = img_html.xpath(".//div[@id='big-pic']//img/@src")[0]
with open(f"D:/图片及视频/图片/爬虫/{dir_name}/{i}.jpg","wb") as f:
f.write(requests.get(img_url).content)