平时pytorch用得多,tf的代码还是弄了半天,网上的code不靠谱太多。当然得先down模型,clone tensorflow models,然后执行下代码里的export。
# encoding: utf-8
import os
import sys
import cv2
import glob
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import mobilenet_v1
import skimage
import skimage.io
import skimage.transform
from sklearn.preprocessing import Normalizer
# export PYTHONPATH="$PYTHONPATH:/ai/tensorflow/models/research/slim"
def load_image(path):
# load image
img = skimage.io.imread(path)
img = img / 255.0
assert (0 <= img).all() and (img <= 1.0).all()
# print "Original Image Shape: ", img.shape
# we crop image from center
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
# resize to 224, 224
resized_img = skimage.transform.resize(crop_img, (224, 224))
return resized_img
if __name__ == "__main__":
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
norm2 = Normalizer(norm='l2')
ckpt_path = './mobilenet_v1/mobilenet_v1_1.0_224.ckpt'
img1 = load_image(sys.argv[1])
img2 = load_image(sys.argv[2])
batch1 = img1.reshape((1, 224, 224, 3))
batch2 = img2.reshape((1, 224, 224, 3))
batch = np.concatenate((batch1, batch2), 0)
images = tf.placeholder("float", [2, 224, 224, 3])
with tf.contrib.slim.arg_scope(mobilenet_v1.mobilenet_v1_arg_scope()):
logits, endpoints = mobilenet_v1.mobilenet_v1(images, num_classes=1001)
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, ckpt_path)
fc_map = endpoints['AvgPool_1a']
fc_feat = tf.squeeze(fc_map, [1, 2])
feed_dict = { images: batch }
fc_feature = sess.run(fc_feat, feed_dict=feed_dict)
norm_feas = norm2.fit_transform(fc_feature)
print(np.matmul(norm_feas[0], norm_feas[1].T))