写的很垃圾但是能用。
一、下载单张图片
def run():
response = requests.get("https://publish-pic-cpu.baidu.com/56ca17ca-7086-430f-900d-1de68928ef72.png")
with open("56ca17ca-7086-430f-900d-1de68928ef72.png", "wb") as f:
f.write(response.content)
f.close
if __name__ == '__main__':
run()
二.分析网址规律,筛选图片地址加分页
# This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import os
import requests
import threading # 多线程模块
import re # 正则表达式模块
import time # 时间模块
all_urls = [] # 我们拼接好的图片集和列表路径
g_lock = threading.Lock() # 初始化一个锁
all_img_urls = []
threads = []
def run():
response = requests.get("https://publish-pic-cpu.baidu.com/56ca17ca-7086-430f-900d-1de68928ef72.png")
with open("56ca17ca-7086-430f-900d-1de68928ef72.png", "wb") as f:
f.write(response.content)
f.close
# http://sj.zol.com.cn/bizhi/meinv/1.html
# http://sj.zol.com.cn/bizhi/meinv/2.html
# http://sj.zol.com.cn/bizhi/meinv/3.html
#得到图片分页网址
class Spider():
def __init__(self, target_url):
self.target_url = target_url
def __getUrls__(self, start_page, page_num):
global all_urls
# 循环得到URL
for i in range(start_page, page_num + 1):
url = self.target_url % i
all_urls.append(url)
#正则匹配筛选出图片链接
class Producer(threading.Thread):
def run(self):
global all_urls # 获取所有地址数组
while len(all_urls) > 0: # 循环urls地址
g_lock.acquire() # 在访问all_urls的时候,需要使用锁机制
page_url = all_urls.pop() # 通过pop方法移除最后一个元素,并且返回该值
print("分析" + page_url)
g_lock.release() # 使用完成之后及时把锁给释放,方便其他线程使用
response = requests.get(page_url)
img = re.compile(r"""<img\s.*?\s?src\s*=\s*['|"]?([^\s'"]+).*?>""", re.I)
all_pic_link = img.findall(response.text)
global all_img_urls
g_lock.acquire() # 这里还有一个锁
all_img_urls += all_pic_link # 这个地方注意数组的拼接,没有用append直接用的+=也算是python的一个新语法吧
print(all_img_urls)
g_lock.release() # 释放锁
time.sleep(0.5)
#下载图片
class DownPic(threading.Thread):
global path
global all_img_urls
def run(self):
if not os.path.exists(path): #是否存在根目录文件夹
os.makedirs(path)
while len(all_img_urls) != 0: # 没有图片就退出
g_lock.acquire() #锁定
if len(all_img_urls) == 0: # 如果没有图片了,就解锁
# 不管什么情况,都要释放锁
print("--下载完成--")
continue
else:
g_lock.release()
pic = all_img_urls.pop()
print("--开始下载---" + str(len(all_img_urls)))
split = pic.split("/")
response = requests.get(pic)
with open("image/" + split[len(split) - 1], 'wb') as f:
f.write(response.content)
f.close
if __name__ == '__main__':
run()
target_url = 'http://sj.zol.com.cn/bizhi/meinv/%d.html' # 图片集和列表规则
spider = Spider(target_url)
spider.__getUrls__(1, 6)
print(all_urls)
# 获取图片地址
for x in range(2): #开启两个线程抓取图片
t = Producer()
t.start()
threads.append(t)
# 等待所有图片都获取到
for tt in threads:
tt.join()
print("图片地址获取完毕")
# 开启10个线程保存图片
for x in range(10):
down = DownPic()
down.start()
print(f"开始下载图片线程{x}")
里面挺简单的都有注释 一个一个方法弄都能弄出来,适合初学者。