'''
爬取指定关键字图片
'''
import re # 正则表达式,解析网页
import requests # 请求网页
import traceback
import os
def dowmloadPic(html, keyword, startNum):
headers = {'user-agent': 'Mozilla/5.0'} # 浏览器伪装,因为有的网站会反爬虫,通过该headers可以伪装成浏览器访问,否则user-agent中的代理信息为python
pic_url = re.findall('"objURL":"(.*?)",', html, re.S) # 找到符合正则规则的目标网站
num = len(pic_url)
i = startNum
subroot = root + '/' + word
txtpath = subroot + '/download_detail.txt'
print('找到关键词:' + keyword + '的图片,现在开始下载图片...')
for each in pic_url:
a = '第' + str(i + 1) + '张图片,图片地址:' + str(each) + '\n'
b = '正在下载' + a
print(b)
path = subroot + '/' + str(i + 1)
try:
if not os.path.exists(subroot):
os.mkdir(subroot)
if not os.path.exists(path):
pic = requests.get(each, headers=headers, timeout=10)
with open(path + '.jpg', 'wb') as f:
f.write(pic.content)
f.close()
with open(txtpath, 'a') as f:
f.write(a)
f.close()
except:
traceback.print_exc()
print('【错误】当前图片无法下载')
continue
i += 1
return i
if __name__ == '__main__':
headers = {'user-agent': 'Mozilla/5.0'}
# words = ['横幅', '标语', 'slogan']
words = ['盆栽植物']
# words为一个列表,可以自动保存多个关键字的图片
root = './download_images'
for word in words:
root = root + word + '&'
if not os.path.exists(root):
os.mkdir(root)
for word in words:
lastNum = 0
# word = input("Input key word: ")
if word.strip() == "exit":
break
pageId = 0
# 此处的参数为需爬取的页数,设置为30页
for i in range(5): #获取5*60张图
url = 'http://image.baidu.com/search/flip?tn=baiduimage&ie=utf-8&word=' + word + "&pn=" + str(
pageId) + "&gsm=?&ct=&ic=0&lm=-1&width=0&height=0"
pageId += 20
html = requests.get(url, headers=headers)
# print(html.text) #打印网页源码,相当于在网页中右键查看源码内容
lastNum = dowmloadPic(html.text, word, lastNum, ) # 本条语句执行一次获取60张图
去掉重复的图片:
import shutil
import numpy as np
from PIL import Image
import os
def 比较图片大小(dir_image1, dir_image2):
with open(dir_image1, "rb") as f1:
size1 = len(f1.read())
with open(dir_image2, "rb") as f2:
size2 = len(f2.read())
if (size1 == size2):
result = "大小相同"
else:
result = "大小不同"
return result
def 比较图片尺寸(dir_image1, dir_image2):
image1 = Image.open(dir_image1)
image2 = Image.open(dir_image2)
if (image1.size == image2.size):
result = "尺寸相同"
else:
result = "尺寸不同"
return result
def 比较图片内容(dir_image1, dir_image2):
image1 = np.array(Image.open(dir_image1))
image2 = np.array(Image.open(dir_image2))
if (np.array_equal(image1, image2)):
result = "内容相同"
else:
result = "内容不同"
return result
def 比较两张图片是否相同(dir_image1, dir_image2):
# 比较两张图片是否相同
# 第一步:比较大小是否相同
# 第二步:比较长和宽是否相同
# 第三步:比较每个像素是否相同
# 如果前一步不相同,则两张图片必不相同
result = "两张图不同"
大小 = 比较图片大小(dir_image1, dir_image2)
if (大小 == "大小相同"):
尺寸 = 比较图片尺寸(dir_image1, dir_image2)
if (尺寸 == "尺寸相同"):
内容 = 比较图片内容(dir_image1, dir_image2)
if (内容 == "内容相同"):
result = "两张图相同"
return result
if __name__ == '__main__':
load_path = 'F:\\1203\\11.kitchen\\' # 要去重的文件夹
save_path = 'G:\\1203\\666' # 空文件夹,用于存储检测到的重复的照片
os.makedirs(save_path, exist_ok=True)
# 获取图片列表 file_map,字典{文件路径filename : 文件大小image_size}
file_map = {}
image_size = 0
# 遍历filePath下的文件、文件夹(包括子目录)
for parent, dirnames, filenames in os.walk(load_path):
# for dirname in dirnames:
# print('parent is %s, dirname is %s' % (parent, dirname))
for filename in filenames:
# print('parent is %s, filename is %s' % (parent, filename))
# print('the full name of the file is %s' % os.path.join(parent, filename))
image_size = os.path.getsize(os.path.join(parent, filename))
file_map.setdefault(os.path.join(parent, filename), image_size)
# 获取的图片列表按 文件大小image_size 排序
file_map = sorted(file_map.items(), key=lambda d: d[1], reverse=False)
file_list = []
for filename, image_size in file_map:
file_list.append(filename)
# 取出重复的图片
file_repeat = []
for currIndex, filename in enumerate(file_list):
dir_image1 = file_list[currIndex]
dir_image2 = file_list[currIndex + 1]
result = 比较两张图片是否相同(dir_image1, dir_image2)
if (result == "两张图相同"):
file_repeat.append(file_list[currIndex + 1])
print("\n相同的图片:", file_list[currIndex], file_list[currIndex + 1])
else:
print('\n不同的图片:', file_list[currIndex], file_list[currIndex + 1])
currIndex += 1
if currIndex >= len(file_list) - 1:
break
# 将重复的图片移动到新的文件夹,实现对原文件夹降重
for image in file_repeat:
shutil.move(image, save_path)
print("正在移除重复照片:", image)
2万+

被折叠的 条评论
为什么被折叠?



