Tensorflow 读取frozen graph(.pb格式)模型并运行

软件版本:
tensorflow == 1.12.0
python == 3.6.1

关于cpkt模型的加载与使用可以看这个

关于.pb模型

不同于cpkt的是.pb模型将模型参数和网络结构固化在同一个文件中,使用中无需分别读取参数和结构,通过tensorflow自带函数可一并读取至graph,之后操作graph即可完成运算。

模型保存

https://blog.csdn.net/u014568072/article/details/85281769

模型读取

首先将模型文件存放至代码的同级目录,我的模型文件名为yolov4-tiny.pb,所以将其输入GFile函数并设置mode为’rb’,运行下列代码。之后模型被存入变量graph。

    with tf.gfile.GFile('yolov4-tiny.pb', "rb") as pb:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(pb.read())
    with tf.Graph().as_default() as graph:
        tf.import_graph_def(
                graph_def,
                name="",  # name可以自定义,修改name之后记得在下面的代码中也要改过来
                )
    for op in graph.get_operations():
        print(op.name, op.values())  # 打印网络结构

观察打印出的网络结构如下所示,左侧如“inputs”为operation名称,括号内如“inputs:0”为tensor名称,接下来使用get_tensor_by_name时要使用tensor名称。

inputs (<tf.Tensor 'inputs:0' shape=(?, 416, 416, 3) dtype=float32>,)
detector/truediv/y (<tf.Tensor 'detector/truediv/y:0' shape=() dtype=float32>,)
detector/truediv (<tf.Tensor 'detector/truediv:0' shape=(?, 416, 416, 3) dtype=float32>,)
detector/yolo-v4-tiny/Pad/paddings (<tf.Tensor 'detector/yolo-v4-tiny/Pad/paddings:0' shape=(4, 2) dtype=int32>,)
detector/yolo-v4-tiny/Pad (<tf.Tensor 'detector/yolo-v4-tiny/Pad:0' shape=(?, 418, 418, 3) dtype=float32>,)
detector/yolo-v4-tiny/Conv/weights (<tf.Tensor 'detector/yolo-v4-tiny/Conv/weights:0' shape=(3, 3, 3, 32) dtype=float32>,)
detector/yolo-v4-tiny/Conv/weights/read (<tf.Tensor 'detector/yolo-v4-tiny/Conv/weights/read:0' shape=(3, 3, 3, 32) dtype=float32>,)
detector/yolo-v4-tiny/Conv/Conv2D (<tf.Tensor 'detector/yolo-v4-tiny/Conv/Conv2D:0' shape=(?, 208, 208, 32) dtype=float32>,)
…………

模型运行

之后使用get_tensor_by_name获取输入和输出节点,并运行session即可得到结果

    node_in = graph.get_tensor_by_name('inputs:0')  # 此处填入输入节点名称
    node_out = graph.get_tensor_by_name('detector/yolo-v4-tiny/Reshape_4:0')  # 此处填入输出节点名称

    with tf.Session(graph=graph) as sess:  # Session()别忘了传入参数!
        # sess.run(tf.global_variables_initializer())  # 因为是从模型中读取,所以无需初始化变量
        feed_dict = {node_in: image}  # image为node_in输入数据,有关代码已省略
        pred = sess.run(node_out, feed_dict)  #运行session,得到node_out
        print(pred)
        sess.close()

需要特别注意的一点是,tf.Session(graph=graph)要将graph作为参数传入!否则将会报错提示operation不存在!

运行程序可以看到pred值被成功打印
在这里插入图片描述

参考文章
https://www.jb51.net/article/179098.htm
https://blog.csdn.net/lujiandong1/article/details/53385092

  • 5
    点赞
  • 7
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
将这两个代码结合import cv2 import numpy as np import urllib.request import tensorflow as tf # 下载DeepLabv3+模型权重文件 model_url = "http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz" tar_filename = "deeplabv3_mnv2_pascal_train_aug.tar.gz" urllib.request.urlretrieve(model_url, tar_filename) # 解压缩 with tarfile.open(tar_filename, "r:gz") as tar: tar.extractall() model_filename = "deeplabv3_mnv2_pascal_train_aug/frozen_inference_graph.pb" # 加载模型 graph = tf.Graph() with graph.as_default(): od_graph_def = tf.GraphDef() with tf.io.gfile.GFile(model_filename, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') # 读取图像 image_path = "your_image.jpg" image = cv2.imread(image_path) # 进行图像分割 with tf.compat.v1.Session(graph=graph) as sess: input_tensor = graph.get_tensor_by_name('ImageTensor:0') output_tensor = graph.get_tensor_by_name('SemanticPredictions:0') output = sess.run(output_tensor, feed_dict={input_tensor: image}) # 解码并可视化分割结果 segmentation_mask = np.squeeze(output) segmentation_mask = np.uint8(segmentation_mask) segmentation_mask = cv2.resize(segmentation_mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) # 显示原始图像和分割结果 cv2.imshow("Image", image) cv2.imshow("Segmentation Mask", segmentation_mask) cv2.waitKey(0) cv2.destroyAllWindows() model1 = models.CellposeModel(gpu=True, model_type='livecell') model2 = models.Cellpose(gpu=True,model_type='nuclei') model3= models.Cellpose(gpu=True,model_type='cyto2') 集成DeepLabv3+模型和cellpose模型
07-14
要将DeepLabv3+模型和Cellpose模型集成,你可以按照以下步骤进行修改: ```python import cv2 import numpy as np import urllib.request import tensorflow as tf from cellpose import models # 下载DeepLabv3+模型权重文件 model_url = "http://download.tensorflow.org/models/deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz" tar_filename = "deeplabv3_mnv2_pascal_train_aug.tar.gz" urllib.request.urlretrieve(model_url, tar_filename) # 解压缩 with tarfile.open(tar_filename, "r:gz") as tar: tar.extractall() model_filename = "deeplabv3_mnv2_pascal_train_aug/frozen_inference_graph.pb" # 加载DeepLabv3+模型 graph = tf.Graph() with graph.as_default(): od_graph_def = tf.GraphDef() with tf.io.gfile.GFile(model_filename, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') # 加载Cellpose模型 model1 = models.CellposeModel(gpu=True, model_type='livecell') model2 = models.CellposeModel(gpu=True, model_type='nuclei') model3 = models.CellposeModel(gpu=True, model_type='cyto2') # 读取图像 image_path = "your_image.jpg" image = cv2.imread(image_path) # 进行图像分割(DeepLabv3+模型) with tf.compat.v1.Session(graph=graph) as sess: input_tensor = graph.get_tensor_by_name('ImageTensor:0') output_tensor = graph.get_tensor_by_name('SemanticPredictions:0') output = sess.run(output_tensor, feed_dict={input_tensor: image}) # 解码并可视化分割结果(DeepLabv3+模型) segmentation_mask = np.squeeze(output) segmentation_mask = np.uint8(segmentation_mask) segmentation_mask = cv2.resize(segmentation_mask, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_NEAREST) # 进行图像分割(Cellpose模型) masks1, _, _, _ = model1.eval(image) masks2, _, _, _ = model2.eval(image) masks3, _, _, _ = model3.eval(image) # 可视化结果 cv2.imshow("Original Image", image) cv2.imshow("Segmentation Mask (DeepLabv3+)", segmentation_mask) cv2.imshow("Cell Masks (Model 1)", masks1) cv2.imshow("Cell Masks (Model 2)", masks2) cv2.imshow("Cell Masks (Model 3)", masks3) cv2.waitKey(0) cv2.destroyAllWindows() ``` 这样,你就可以同时使用DeepLabv3+模型和Cellpose模型进行图像分割和细胞分割了。请注意,你需要根据你的需求进行适当的调整和修改。 希望这可以帮助到你!如果还有其他问题,请随时告诉我。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值