tensorflow 持久化原理

____tz_zs




 tensorflow 提供tf.train.Saver类来保存和加载神经网络模型,saver.save()保存模型,saver.restore加载模型

源码saver.py中定义的保存方法

 def save(self,
           sess,
           save_path,
           global_step=None,
           latest_filename=None,
           meta_graph_suffix="meta",
           write_meta_graph=True,
           write_state=True):

sess:用于保存变量的会话

save_path:保存的路径名

global_step:如果提供了全局步数,则在保存文件的末尾加上训练轮数

latest_filename:协议缓冲区文件的可选名称

meta_graph_suffix: `MetaGraphDef` 文件的后缀,默认为  'meta'

write_meta_graph:标示是否写入元图文件

write_state:标示是否写 `CheckpointStateProto`文件



code1:使用的小Demo

# -*- coding: utf-8 -*-
"""
@author: tz_zs

持久化实现 模型的保存和加载
"""
import tensorflow as tf

v1 = tf.Variable(tf.constant(1.0, shape=[1]), name="v1")
v2 = tf.Variable(tf.constant(2.0, shape=[1]), name="v2")
result1 = v1 + v2

init_op = tf.global_variables_initializer()
saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run(init_op)
    # 保存模型到/path/to/model/model.ckpt文件
    saver.save(sess, "/path/to/model/model.ckpt")

'''
保存模型到了/path/to/model/model.ckpt文件中。
此时会生成三个文件:
model.ckpt.meta
model.ckpt
checkpoint
'''

# 加载模型
saver2 = tf.train.import_meta_graph("/path/to/model/model.ckpt.meta")

with tf.Session() as sess:
    saver2.restore(sess, "/path/to/model/model.ckpt")
    print(sess.run(tf.get_default_graph().get_tensor_by_name("add:0")))  # [ 3.]


code2:使用 convert_variables_to_constants 这个函数,可以将计算图中的变量及其取值通过常量的方式保存,写入单个文件

# -*- coding: utf-8 -*-
"""
@author: tz_zs
单文件 常量的方式保存
convert_variables_to_constants
"""

import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile

v1 = tf.Variable(1, dtype=tf.float32, name="v1")
v2 = tf.Variable(2, dtype=tf.float32, name="v2")
result = v1 + v2

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    graph_def = tf.get_default_graph().as_graph_def()
    print("#####" * 5 + " 得到当前默认的计算图graph_def " + "#####" * 5)
    print(graph_def)

    print("#####" * 5 + " add节点相关的output_graph_def " + "#####" * 5)
    output_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, ["add"])
    print(output_graph_def)
    with tf.gfile.GFile("/path/to/model/combined_model.pb", "wb") as f:
        f.write(output_graph_def.SerializeToString())

print("#####" * 5 + " 加载模型 " + "#####" * 5)
with tf.Session() as sess:
    model_filename = "/path/to/model/combined_model.pb"
    with gfile.FastGFile(model_filename, 'rb') as f:
        graph_def2 = tf.GraphDef()
        graph_def2.ParseFromString(f.read())
        print(graph_def2)

    # 可以通过张量的名称取某个张量的值
    result = tf.import_graph_def(graph_def2, return_elements=["add:0"])
    result_v1 = tf.import_graph_def(graph_def2, return_elements=["v1:0"])
    print(result)
    print(sess.run(result))
    print(result_v1)
    print(sess.run(result_v1))

运行的输出结果:

######################### 得到当前默认的计算图graph_def #########################
node {
  name: "v1/initial_value"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
        }
        float_val: 1.0
      }
    }
  }
}
node {
  name: "v1"
  op: "VariableV2"
  attr {
    key: "container"
    value {
      s: ""
    }
  }
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "shape"
    value {
      shape {
      }
    }
  }
  attr {
    key: "shared_name"
    value {
      s: ""
    }
  }
}
node {
  name: "v1/Assign"
  op: "Assign"
  input: "v1"
  input: "v1/initial_value"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v1"
      }
    }
  }
  attr {
    key: "use_locking"
    value {
      b: true
    }
  }
  attr {
    key: "validate_shape"
    value {
      b: true
    }
  }
}
node {
  name: "v1/read"
  op: "Identity"
  input: "v1"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v1"
      }
    }
  }
}
node {
  name: "v2/initial_value"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
        }
        float_val: 2.0
      }
    }
  }
}
node {
  name: "v2"
  op: "VariableV2"
  attr {
    key: "container"
    value {
      s: ""
    }
  }
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "shape"
    value {
      shape {
      }
    }
  }
  attr {
    key: "shared_name"
    value {
      s: ""
    }
  }
}
node {
  name: "v2/Assign"
  op: "Assign"
  input: "v2"
  input: "v2/initial_value"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v2"
      }
    }
  }
  attr {
    key: "use_locking"
    value {
      b: true
    }
  }
  attr {
    key: "validate_shape"
    value {
      b: true
    }
  }
}
node {
  name: "v2/read"
  op: "Identity"
  input: "v2"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v2"
      }
    }
  }
}
node {
  name: "add"
  op: "Add"
  input: "v1/read"
  input: "v2/read"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
}
node {
  name: "init"
  op: "NoOp"
  input: "^v1/Assign"
  input: "^v2/Assign"
}
versions {
  producer: 22
}


######################### add节点相关的output_graph_def #########################
Converted 2 variables to const ops.
node {
  name: "v1"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
        }
        float_val: 1.0
      }
    }
  }
}
node {
  name: "v1/read"
  op: "Identity"
  input: "v1"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v1"
      }
    }
  }
}
node {
  name: "v2"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
        }
        float_val: 2.0
      }
    }
  }
}
node {
  name: "v2/read"
  op: "Identity"
  input: "v2"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v2"
      }
    }
  }
}
node {
  name: "add"
  op: "Add"
  input: "v1/read"
  input: "v2/read"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
}
library {
}


######################### 加载模型 #########################
node {
  name: "v1"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
        }
        float_val: 1.0
      }
    }
  }
}
node {
  name: "v1/read"
  op: "Identity"
  input: "v1"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v1"
      }
    }
  }
}
node {
  name: "v2"
  op: "Const"
  attr {
    key: "dtype"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "value"
    value {
      tensor {
        dtype: DT_FLOAT
        tensor_shape {
        }
        float_val: 2.0
      }
    }
  }
}
node {
  name: "v2/read"
  op: "Identity"
  input: "v2"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
  attr {
    key: "_class"
    value {
      list {
        s: "loc:@v2"
      }
    }
  }
}
node {
  name: "add"
  op: "Add"
  input: "v1/read"
  input: "v2/read"
  attr {
    key: "T"
    value {
      type: DT_FLOAT
    }
  }
}
library {
}


[<tf.Tensor 'import/add:0' shape=() dtype=float32>]
[3.0]
[<tf.Tensor 'import_1/v1:0' shape=() dtype=float32>]
[1.0]


Process finished with exit code 0












评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值