1.需要解决的问题:
在一个py文件中同时加载多个训练好的CNN模型文件,并输出多个模型的识别结果。
2.解决思路
(1)创建多个计算图,在多个会话中执行。(可改进的地方:一个图可以在多个sess中运行,一个sess也能运行多个图)
(2)由于多个计算图的执行是相互独立的,所以,可以分别进行每个模型的加载与识别。
(3)将识别结果保存到一个csv文件中。
3.实现–以三个模型为例
(1)创建三个计算图,使用 g = tf.Graph()函数创建新的计算图。
(2)在with g.as_default():语句下定义属于计算图g的张量和操作,比如读取图片,变成张量形式,以及压缩等。
(3)利用with tf.Session()执行计算图。
(4)需要加载几个模型就创建几个计算图。
4,代码实现,结合pyqt
4.1.串行执行,即一个模型一个模型的识别与写入
if self.model_alex != []: #如果设置了alexnet模型的路径
g1 = tf.Graph() # 1.创建新的计算图
# 2.定义属于计算图g1的张量和操作
with g1.as_default():
file_queue = tf.train.string_input_producer(file_list, shuffle=False) #file_list:图片路径列表,测试和识别时只能顺序读取,否则无法对应图片和标签,所以设置为False
reader = tf.WholeFileReader()
key, value = reader.read(file_queue) # key:文件名 value:文件内容
x_alex = tf.compat.v1.placeholder(shape=[None, 227, 227, 3], dtype=tf.float32)#占位
image = tf.image.decode_jpeg(value) # 解码为张量
image_resize_alex = tf.image.resize_images(image, [227, 227])
image_resize_alex.set_shape([227, 227, 3])#压缩图片
image_batch_alex = tf.train.batch([image_resize_alex], batch_size=batch_size,
num_threads=1,
capacity=1000) # capacity:队列容量
#设置批量识别的大小
# 前向传播过程,调用写好的alexnet网络结构的代码
y_alex, _ = AlexNet.alexnet(x_alex, False) # 预测值
y_alex = tf.reshape(y_alex, (-1, 2))
# 评估模型
y1_alex = tf.argmax(y_alex, 1) # y1为预测值
#使用第一个模型
with tf.Session(graph=g1) as sess1: #3.在g1计算图上创建会话
tf.global_variables_initializer().run() #初始化所有遍历
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess1, coord) #启动队列,没有这句会话不能读入图片的数据。
filenames = os.listdir(self.model_alex) # 识别模型目录
# 有保存的参数,则加载
if filenames != []:
try:
for name in filenames:
if ".meta" in name:
save_meta = name
saver = tf.train.import_meta_graph(self.model_alex + "/" + save_meta) # 导入模型
saver.restore(sess1, (self.model_alex + "\\" + save_meta[0:12]))
print("Load model parameters success!")
except Exception as e:
print(e)
print("Failed to load model parameters!")
for i in range(epoch):
print(i)
image = sess1.run([image_batch_alex])
print("image:",image)
l1, p = sess1.run([y1_alex, y_alex], feed_dict={x_alex: image[0]}) # l1为模型预测,l2为实际值
print('step={},test_label={}'.format(i, l1))
for number in l1:
self.img_label.append(number) #记录所有标签
if i == epoch-1:#去除最后一次循环多余的batch
a = epoch_total % batch_size
l1=l1[:a]
self.nullpic += sum(l1 == 0)
self.notnullpic += sum(l1 == 1)
self.recongizesignal.emit(i+1, epoch,epoch_total, self.nullpic, self.notnullpic)
#将分类结果写到.csv文件
with open(save_dir+"//"+"result.csv", "w", newline='') as f:
writer = csv.writer(f)
writer.writerow(["Path","Model","Label"])
jishu = 0
for file_name in self.contents:
label = self.img_label[jishu]
jishu += 1
writer.writerow([file_name,"AlexNet",label]) # 保存
print("Alexnet test finished")
coord.request_stop()
coord.join(threads)
以上第一个计算图结束,最终会生成一个result.csv文件,包含图片路径和标签。下面开始第二个计算图的创建和使用。
#inception模型
if self.model_inception != []:
g2 = tf.Graph() # 创建计算图
#加载第2个模型
with g2.as_default():
file_queue = tf.train.string_input_producer(self.contents, shuffle=False)
reader = tf.WholeFileReader()
key, value = reader.read(file_queue) # key:文件名 value:文件内容
image = tf.image.decode_jpeg(value) # 解码为张量
image_resize_inception = tf.image.resize_images(image, [299, 299])
image_resize_inception.set_shape([299, 299, 3])
image_batch_inception = tf.train.batch([image_resize_inception], batch_size=batch_size,
num_threads=1,
capacity=1000) # capacity:队列容量
x_inception = tf.compat.v1.placeholder(shape=[None, 299, 299, 3], dtype=tf.float32)
y_inception, _ = Inception_v3.inception_v3(x_inception, False) # 预测值
y_inception = tf.reshape(y_inception, (-1, 2))
y1_inception = tf.argmax(y_inception, 1)
#使用第二个模型
with tf.Session(graph=g2) as sess2: #3.在g1计算图上创建会话
tf.global_variables_initializer().run()
# 启动线程
coord2 = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess2, coord=coord2)
filenames = os.listdir(self.model_inception) # 识别模型目录
# 有保存的参数,则加载
if filenames != []:
try:
for name in filenames:
if ".meta" in name:
save_meta = name
saver = tf.train.import_meta_graph(self.model_inception + "/" + save_meta) # 导入模型
saver.restore(sess2, (self.model_inception + "\\" + save_meta[0:12]))
print("Load inception V3 model parameters success!")
except Exception as e:
print(e)
print("Failed to load model parameters!")
self.img_label=[]
self.nullpic = 0
self.notnullpic = 0
for i in range(epoch):
image = sess2.run([image_batch_inception])
l1, p = sess2.run([y1_inception, y_inception], feed_dict={x_inception: image[0]}) # l1为模型预测,l2为实际值
print('step={},test_label={}'.format(i, l1))
for number in l1:
self.img_label.append(number) #记录所有标签
if i == epoch-1:#去除最后一次循环多余的batch
a = epoch_total % batch_size
l1=l1[:a]
self.nullpic += sum(l1 == 0)
self.notnullpic += sum(l1 == 1)
self.recongizesignal.emit(i+1, epoch,epoch_total, self.nullpic, self.notnullpic)
#将分类结果写到.csv文件 这里只写标签和模型 追加
with open(save_dir+"//"+"result.csv", "a+", newline='') as f:
writer = csv.writer(f)
writer.writerow(["label",'Model'])
jishu = 0
for file_name in self.contents:
label = self.img_label[jishu]
jishu += 1
writer.writerow(["Inception V3",label]) # 保存
print("Inception v3 test finished")
coord2.request_stop()
coord2.join(threads)
第三个计算图创建方式同上。只需修改模型的加载位置和追加结果即可。
4.2.并行执行–同时识别和写入
将会话的执行和图张量与操作的定义分离,即:
#alexnet模型
if self.model_alex != []:
g1 = tf.Graph() # 1.创建新的计算图
sess1 = tf.Session(graph=g1)
with sess1.as_default():
with g1.as_default():
file_queue = tf.train.string_input_producer(self.contents, shuffle=False) #文件队列
reader = tf.WholeFileReader()
key, value = reader.read(file_queue) # key:文件名 value:文件内容
x_alex = tf.compat.v1.placeholder(shape=[None, 227, 227, 3], dtype=tf.float32)
image = tf.image.decode_jpeg(value) # 解码为张量
image_resize_alex = tf.image.resize_images(image, [227, 227])
image_resize_alex.set_shape([227, 227, 3])
image_batch_alex = tf.train.batch([image_resize_alex], batch_size=batch_size,
num_threads=1,
capacity=1000) # capacity:队列容量
# 前向传播过程
y_alex, _ = AlexNet.alexnet(x_alex, False) # 预测值
y_alex = tf.reshape(y_alex, (-1, 2))
# 评估模型
y1_alex = tf.argmax(y_alex, 1) # y1为预测值
filenames = os.listdir(self.model_alex) # 识别模型目录
# 有保存的参数,则加载
if filenames != []:
try:
for name in filenames:
if ".meta" in name:
save_meta = name
saver = tf.train.import_meta_graph(self.model_alex + "/" + save_meta) # 导入模型
saver.restore(sess1, (self.model_alex + "\\" + save_meta[0:12]))
# print("Load model parameters success!")
except Exception as e:
# print(e)
# print("Failed to load model parameters!")
pass
sess1.run(tf.global_variables_initializer())
#inception模型
if self.model_inception != []:
g2 = tf.Graph() # 1.创建新的计算图
sess2 = tf.Session(graph=g2)
with sess2.as_default():
with g2.as_default():
file_queue = tf.train.string_input_producer(self.contents, shuffle=False)
reader = tf.WholeFileReader()
key, value = reader.read(file_queue) # key:文件名 value:文件内容
image = tf.image.decode_jpeg(value) # 解码为张量
image_resize_inception = tf.image.resize_images(image, [299, 299])
image_resize_inception.set_shape([299, 299, 3])
image_batch_inception = tf.train.batch([image_resize_inception], batch_size=batch_size,
num_threads=1,
capacity=1000) # capacity:队列容量
x_inception = tf.compat.v1.placeholder(shape=[None, 299, 299, 3], dtype=tf.float32)
y_inception, _ = Inception_v3.inception_v3(x_inception, False) # 预测值
y_inception = tf.reshape(y_inception, (-1, 2))
y1_inception = tf.argmax(y_inception, 1)
filenames = os.listdir(self.model_inception) # 识别模型目录
# 有保存的参数,则加载
if filenames != []:
try:
for name in filenames:
if ".meta" in name:
save_meta = name
saver = tf.train.import_meta_graph(self.model_inception + "/" + save_meta) # 导入模型
saver.restore(sess2, (self.model_inception + "\\" + save_meta[0:12]))
# print("Load inception V3 model parameters success!")
except Exception as e:
# print(e)
# print("Failed to load model parameters!")
pass
sess2.run(tf.global_variables_initializer())
#resnet模型
if self.model_resnet != []:
g3 = tf.Graph() # 创建第3个计算图
sess3 = tf.Session(graph=g3)
with sess3.as_default():
with g3.as_default():
file_queue = tf.train.string_input_producer(self.contents, shuffle=False)
reader = tf.WholeFileReader()
key, value = reader.read(file_queue) # key:文件名 value:文件内容
image = tf.image.decode_jpeg(value) # 解码为张量
image_resize_resnet = tf.image.resize_images(image, [224, 224])
image_resize_resnet.set_shape([224, 224, 3])
image_batch_resnet = tf.train.batch([image_resize_resnet], batch_size=batch_size,
num_threads=1,
capacity=1000) # capacity:队列容量
x_resnet = tf.compat.v1.placeholder(shape=[None, 224, 224, 3], dtype=tf.float32)
y_resnet, _ = ResNet_18.resnet_18(x_resnet, False) # 预测值
y_resnet = tf.reshape(y_resnet, (-1, 2))
y1_resnet = tf.argmax(y_resnet, 1)
filenames = os.listdir(self.model_resnet) # 识别模型目录
# 有保存的参数,则加载
if filenames != []:
try:
for name in filenames:
if ".meta" in name:
save_meta = name
saver = tf.train.import_meta_graph(self.model_resnet + "/" + save_meta) # 导入模型
saver.restore(sess3, (self.model_resnet + "\\" + save_meta[0:12]))
# print("Load resnet18 model parameters success!")
except Exception as e:
# print(e)
# print("Failed to load model parameters!")
pass
sess3.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
try:
thread1 = tf.train.start_queue_runners(sess1, coord) # 启动队列
except:
pass
try:
thread2 = tf.train.start_queue_runners(sess2, coord) # 启动队列
except:
pass
try:
thread3 = tf.train.start_queue_runners(sess3, coord) # 启动队列
except:
pass
for i in range(epoch):
try:
image_alex = sess1.run([image_batch_alex])
l1, p1 = sess1.run([y1_alex, y_alex], feed_dict={x_alex: image_alex[0]}) # l1为模型预测,l2为实际值
print('step={},alex_label={}'.format(i, l1))
for number_alex in l1:
self.img_label_alex.append(number_alex) # 记录所有标签
except:
pass
try:
image_inception = sess2.run([image_batch_inception])
l2, p2 = sess2.run([y1_inception, y_inception], feed_dict={x_inception: image_inception[0]}) # l1为模型预测,l2为实际值
print('step={},inception_label={}'.format(i, l2))
for number_inception in l2:
self.img_label_inception.append(number_inception) # 记录所有标签
except:
pass
try:
image_resnet = sess3.run([image_batch_resnet])
l3, p3 = sess3.run([y1_resnet, y_resnet], feed_dict={x_resnet: image_resnet[0]}) # l1为模型预测,l2为实际值
print('step={},resnet_label={}'.format(i, l3))
for number_resnet in l3:
self.img_label_resnet.append(number_resnet) # 记录所有标签
except:
pass
if i == epoch - 1: # 去除最后一次循环多余的batch
a = epoch_total % batch_size
try:
l1 = l1[:a]
except:
pass
try:
l2 = l2[:a]
except:
pass
try:
l3 = l3[:a]
except:
pass
try:
self.nullpic_alex += sum(l1 == 0)
self.notnullpic_alex += sum(l1 == 1)
except:
pass
try:
self.nullpic_inception += sum(l2 == 0)
self.notnullpic_inception += sum(l2 == 1)
except:
pass
try:
self.nullpic_resnet += sum(l3 == 0)
self.notnullpic_resnet += sum(l3 == 1)
except:
pass
self.recongizesignal.emit(i + 1, epoch, epoch_total, self.nullpic_alex, self.notnullpic_alex,
self.nullpic_inception,self.notnullpic_inception,
self.nullpic_resnet,self.notnullpic_resnet)
# 将分类结果写到.csv文件
with open(save_dir + "//" + "result.csv", "w", newline='') as f:
writer = csv.writer(f)
if (self.model_alex == [] and self.model_inception == []) or \
(self.model_alex == [] and self.model_resnet == []) or \
(self.model_inception ==[] and self.model_resnet ==[]) :
writer.writerow(["Path", "Model", "Label", "Model"])
if self.model_alex == [] or self.model_inception == [] or self.model_resnet ==[]:
writer.writerow(["Path", "Model", "Label", "Model", "Label"])
if self.model_alex != [] and self.model_resnet != [] and self.model_inception != []:
writer.writerow(["Path", "Model", "Label", "Model", "Label", "Model", "Label"])
jishu = 0
for file_name in self.contents:
if self.model_alex ==[] and self.model_inception ==[]:
writer.writerow([file_name, "ResNet-18", self.img_label_resnet[jishu]]) # 保存
if self.model_alex ==[] and self.model_resnet ==[]:
writer.writerow([file_name, "Inception-V3", self.img_label_inception[jishu]]) # 保存
if self.model_inception ==[] and self.model_resnet ==[]:
writer.writerow([file_name, "AlexNet", self.img_label_alex[jishu]]) # 保存
if self.model_alex ==[]:
writer.writerow([file_name, "Inception-V3", self.img_label_inception[jishu],
"ResNet-18", self.img_label_resnet[jishu]]) # 保存
if self.model_inception == []:
writer.writerow([file_name, "AlexNet", self.img_label_alex[jishu],
"ResNet-18", self.img_label_resnet[jishu]]) # 保存
if self.model_resnet ==[]:
writer.writerow([file_name, "AlexNet", self.img_label_alex[jishu],
"Inception-V3", self.img_label_inception[jishu]]) # 保存
if self.model_alex !=[] and self.model_resnet !=[] and self.model_inception !=[]:
writer.writerow([file_name, "AlexNet", self.img_label_alex[jishu],
"Inception-V3", self.img_label_inception[jishu],
"ResNet-18", self.img_label_resnet[jishu]]) # 保存
jishu += 1
# print("finished")
coord.request_stop()
try:
coord.join(thread1)
except:
pass
try:
coord.join(thread2)
except:
pass
try:
coord.join(thread3)
except:
pass
结果:
实验了两个模型。最终结果文件如上图所示。
5.总结
在同一个py文件中同时加载多个CNN模型,需要创建多个独立的计算图,他们独立工作,独立输出各自的识别结果,最后写入或追加到一个csv结果文件中。即四步:
(1)利用g1 = tf.Graph()创建计算图
(2)利用with g1.as_default():实现该功能下的张量和操作,也就是单个模型识别的图片读取和张量定义等。
(3)在with tf.Session(graph=g1) as sess1 中执行计算图,输出识别结果和写入文件。特别注意Session里面的参数,指定了是哪个计算图。
(4)重复上述步骤。