构建数据集
爬虫爬数据集
爬虫
使用爬虫上百度爬了100张的海绵宝宝的照片
import requests
from urllib import parse
import re
import os
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
'''可以选择需要图片的数量'''
def getImagesLinks(word, num):
images_links = []
page = 30
uriword = parse.quote(word)
rec_num = 0
while True:
needurl = "https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord="+uriword+"&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=&hd=&latest=©right=&word="+uriword+"&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&expermode=&force=&pn="+str(page)+"&rn=30&gsm=78&1595357380029="
page += 30
print(needurl)
data_all = requests.get(needurl)
data_all.encoding = 'utf8'
pat01 = '"thumbURL":"([\\s\\S]*?)"'
res0 = re.compile(pat01).findall(str(data_all.content))
for item in res0:
print(item)
images_links.append(item)
rec_num += 1
print(rec_num)
if rec_num == num:
return images_links
def downLoadImages(links, name, path=None, speedup=False):
''' 检查文件夹是否建立,并建立文件夹 '''
if path == None:
tar = os.path.exists(os.getcwd()+"\\"+name)
if not tar:
os.mkdir(os.getcwd()+"\\"+name)
im_path = os.getcwd()+"\\"+name+"\\"
else:
tar = os.path.exists(path+"\\"+name)
if not tar:
os.mkdir(path+"\\"+name)
im_path = path+"\\"+name+"\\"
def get_pic(pic_path, pic_link):
'''本来想防止重复爬的,但每次爬到的都有点不同,所以好像没什么用'''
tar = os.path.exists(pic_path)
if not tar:
res = requests.get(pic_link).content
with open(pic_path, "wb") as pic_object:
pic_object.write(res)
executor = ThreadPoolExecutor(max_workers=90)
all_tasks = []
for i in range(1, len(links)+1):
task = executor.submit(get_pic, im_path + str(i) + ".jpg", links[i-1])
all_tasks.append(task)
for future in as_completed(all_tasks):
data = future.result()
print("get {} page".format(data))
if __name__ == "__main__":
links = getImagesLinks("海绵宝宝", 100)
start_time = time.time()
downLoadImages(links, "haimian")
print(time.time() - start_time)
爬取的照片如下:
特征提取,生成特征词典
import pickle
from PCV.imagesearch import vocabulary
from PCV.tools.imtools import get_imlist
from PCV.localdescriptors import sift
#获取图像列表
imlist = get_imlist('haimian/')
nbr_images = len(imlist)
#获取特征列表
featlist = [imlist[i][:-3]+'sift' for i in range(nbr_images)]
#提取文件夹下图像的sift特征
for i in range(nbr_images):
sift.process_image(imlist[i], featlist[i])
#生成词汇
voc = vocabulary.Vocabulary('ukbenchtest')
voc.train(featlist, 1000, 10)
#保存词汇
with open('haimian/vocabulary.pkl', 'wb') as f:
pickle.dump(voc, f)
print ('vocabulary is:', voc.name, voc.nbr_words)
对输入特征集量化
import pickle
from PCV.imagesearch import imagesearch
from PCV.localdescriptors import sift
import sqlite3
from PCV.tools.imtools import get_imlist
# 获取图像列表
imlist = get_imlist(r'haimian')
nbr_images = len(imlist)
# 获取特征列表
featlist = [imlist[i][:-3] + 'sift' for i in range(nbr_images)]
# 载入词汇
with open(r'haimian\vocabulary.pkl', 'rb') as f:
voc = pickle.load(f)
# 创建索引
indx = imagesearch.Indexer('testImaAdd.db', voc)
indx.create_tables()
# 遍历所有的图像,并将它们的特征投影到词汇上
for i in range(nbr_images)[:120]:
locs, descr = sift.read_features_from_file(featlist[i])
indx.add_to_index(imlist[i], descr)
# 提交到数据库
indx.db_commit()
con = sqlite3.connect('testImaAdd.db')
print(con.execute('select count (filename) from imlist').fetchone())
print(con.execute('select * from imlist').fetchone())
生成数据库:
建立图像索引进行图像检索
import pickle
from PCV.imagesearch import imagesearch
from PCV.geometry import homography
from PCV.tools.imtools import get_imlist
from PCV.localdescriptors import sift
import warnings
warnings.filterwarnings("ignore")
# load image list and vocabulary
# 载入图像列表
imlist = get_imlist(r'haimian')
nbr_images = len(imlist)
# 载入特征列表
featlist = [imlist[i][:-3] + 'sift' for i in range(nbr_images)]
# 载入词汇
with open(r'haimian\vocabulary.pkl', 'rb') as f:
voc = pickle.load(f, encoding='iso-8859-1')
src = imagesearch.Searcher('testImaAdd.db', voc) # Searcher类读入图像的单词直方图执行查询
# index of query image and number of results to return
# 查询图像索引和查询返回的图像数
q_ind = 3
nbr_results = 10
# regular query
# 常规查询(按欧式距离对结果排序)
res_reg = [w[1] for w in src.query(imlist[q_ind])[:nbr_results]] # 查询的结果
print('top matches (regular):', res_reg)
# load image features for query image
# 载入查询图像特征进行匹配
q_locs, q_descr = sift.read_features_from_file(featlist[q_ind])
fp = homography.make_homog(q_locs[:, :2].T)
# RANSAC model for homography fitting
# 用单应性进行拟合建立RANSAC模型
model = homography.RansacModel()
rank = {}
# load image features for result
# 载入候选图像的特征
for ndx in res_reg[1:]:
locs, descr = sift.read_features_from_file(featlist[ndx]) # because 'ndx' is a rowid of the DB that starts at 1
# get matches
matches = sift.match(q_descr, descr)
ind = matches.nonzero()[0]
ind2 = matches[ind]
tp = homography.make_homog(locs[:, :2].T)
# compute homography, count inliers. if not enough matches return empty list
# 计算单应性矩阵
try:
H, inliers = homography.H_from_ransac(fp[:, ind], tp[:, ind2], model, match_theshold=4)
except:
inliers = []
# store inlier count
rank[ndx] = len(inliers)
# sort dictionary to get the most inliers first
# 对字典进行排序,可以得到重排之后的查询结果
sorted_rank = sorted(rank.items(), key=lambda t: t[1], reverse=True)
res_geom = [res_reg[0]] + [s[0] for s in sorted_rank]
print('top matches (homography):', res_geom)
# 显示查询结果
imagesearch.plot_results(src, res_reg[:6]) # 常规查询
imagesearch.plot_results(src, res_geom[:6]) # 重排后的结果
总结
- 当维度不同时,检索的图像和检索结果的关联度也不同,当维度太小时(例如10)检索的图像和检索结果就没有太大关联,当维度越来越大的时候,视觉词典的细分程度较高,整个视觉词典的大小基础词汇的个数越多,进行图像检索时可以对细小的元素进行比对,将会得到更准确的结果。但可以看出还是存在错误检索的图片。
- 当维度过大时,检索效果又变得十分糟糕,只有一张图片是匹配的。可能是因为当维度过大时产生过拟合,检索的结果反而不好