下载vgg预训练模型
https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5
百度
链接:https://pan.baidu.com/s/1XIXfmJ9bJsRQj1nQKAq7jg 密码:c69e
如果使用默认参数,会自动下载模型500多M
def VGG19(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
下载vgg19 对应的labels,预测结果的数字与分类的对应关系
使用时,需要调整大小
im = cv.imread(path).astype(np.float32)
im = cv.resize(im, (224, 224))
im = np.expand_dims(im, axis=0) # (1, 224, 224, 3)
print(im.shape)
代码
from tensorflow.python.keras.applications.vgg19 import VGG19
import cv2 as cv
import numpy as np
import json
import os
import time
# '''https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels.h5'''
model = VGG19(weights="D:/迅雷下载/vgg19_weights_tf_dim_ordering_tf_kernels.h5")
with open('imagenet_1000_labels.json') as f:
labels = json.load(f)
def solve(path):
im = cv.imread(path).astype(np.float32)
im = cv.resize(im, (224, 224))
im = np.expand_dims(im, axis=0) # (1, 224, 224, 3)
print(im.shape)
out = np.argmax(model.predict(im))
return (out, labels[out])
img_dir = 'imgs'
for name in os.listdir(img_dir):
st = time.time()
res = solve(os.path.join(img_dir, name))
ed = time.time()
print(name, res, ed - st)
预测结果,可以看到第一次花了20多秒,后面就好一点
cat.jpg (285, 'Egyptian cat') 25.418004035949707
(1, 224, 224, 3)
dog.jpg (167, 'English foxhound') 0.12366938591003418
(1, 224, 224, 3)
img.jpg (767, 'rubber eraser, rubber, pencil eraser') 0.474367618560791
(1, 224, 224, 3)
img2.jpg (33, 'loggerhead, loggerhead turtle, Caretta caretta') 0.5545673370361328
(1, 224, 224, 3)
test.jpg (459, 'brassiere, bra, bandeau') 0.17692995071411133
对应的图片,可以说已经很满足需求了。。。。。。