# 一.Graph

## Ⅰ.介绍

import tensorflow as tf
import numpy as np

c=tf.constant(value=1)
#print(assert c.graph is tf.get_default_graph())
print(c.graph)
print(tf.get_default_graph())

import tensorflow as tf
import numpy as np

c=tf.constant(value=1)
#print(assert c.graph is tf.get_default_graph())
print(c.graph)
print(tf.get_default_graph())

g=tf.Graph()
print("g:",g)
with g.as_default():
d=tf.constant(value=2)
print(d.graph)
#print(g)

g2=tf.Graph()
print("g2:",g2)
g2.as_default()
e=tf.constant(value=15)
print(e.graph)

## Ⅱ.属性

building_function:Returns True iff this graph represents a function.

finalized:返回True，要是这个图被终止了

graph_def_versions:The GraphDef version information of this graph.

seed:The graph-level random seed of this graph.

version:Returns a version number that increases as ops are added to the graph.

## Ⅲ.函数

__init__()

name: The key for the collection. The GraphKeys class contains many standard names for collections.
value: 添加到collection中的值

as_default()

# 1. Using Graph.as_default():
g = tf.Graph()
with g.as_default():
c = tf.constant(5.0)
assert c.graph is g

# 2. Constructing and making default:
with tf.Graph().as_default() as g:
c = tf.constant(5.0)
assert c.graph is g

device(*args,**kwds)

device_name_or_function:：在这个上下文下面使用的设备名称或者函数。这个参数可以十一个设备名称字符串，或者十一个设备函数，或者是None。

If it is None, all device() invocations from the enclosing context will be ignored.
For information about the valid syntax of device name strings, see the documentation in DeviceNameUtils.

with g.device('/gpu:0'):
# All operations constructed in this context will be placed
# on GPU 0.
with g.device(None):
# All operations constructed in this context will have no
# assigned device.

# Defines a function from Operation to device string.
def matmul_on_gpu(n):
if n.type == "MatMul":
return "/gpu:0"
else:
return "/cpu:0"

with g.device(matmul_on_gpu):
# All operations of type "MatMul" constructed in this context
# will be placed on GPU 0; all other operations will be placed
# on CPU 0.

finalize()

get_all_collection_keys()

get_operation_by_name(name)

name: 操作名称

get_operations()

get_tensor_by_name(name)

is_feedable(tensor)

is_fetchable(tensor_or_op)

name_scope(*args,**kwds)
Returns a context manager that creates hierarchical names for operations.

A graph maintains a stack of name scopes. A with name_scope(…): statement pushes a new name onto the stack for the lifetime of the context.

The name argument will be interpreted as follows:

A string (not ending with ‘/’) will create a new name scope, in which name is appended to the prefix of all operations created in the context. If name has been used before, it will be made unique by calling self.unique_name(name).
A scope previously captured from a with g.name_scope(…) as scope: statement will be treated as an “absolute” name scope, which makes it possible to re-enter existing scopes.
A value of None or the empty string will reset the current name scope to the top-level (empty) name scope.
For example:

with tf.Graph().as_default() as g:
c = tf.constant(5.0, name="c")
assert c.op.name == "c"
c_1 = tf.constant(6.0, name="c")
assert c_1.op.name == "c_1"

# Creates a scope called "nested"
with g.name_scope("nested") as scope:
nested_c = tf.constant(10.0, name="c")
assert nested_c.op.name == "nested/c"

# Creates a nested scope called "inner".
with g.name_scope("inner"):
nested_inner_c = tf.constant(20.0, name="c")
assert nested_inner_c.op.name == "nested/inner/c"

# Create a nested scope called "inner_1".
with g.name_scope("inner"):
nested_inner_1_c = tf.constant(30.0, name="c")
assert nested_inner_1_c.op.name == "nested/inner_1/c"

# Treats scope as an absolute name scope, and
# switches to the "nested/" scope.
with g.name_scope(scope):
nested_d = tf.constant(40.0, name="d")
assert nested_d.op.name == "nested/d"

with g.name_scope(""):
e = tf.constant(50.0, name="e")
assert e.op.name == "e"

The name of the scope itself can be captured by with g.name_scope(…) as scope:, which stores the name of the scope in the variable scope. This value can be used to name an operation that represents the overall result of executing the ops in a scope. For example:

inputs = tf.constant(…)
with g.name_scope(‘my_layer’) as scope:
weights = tf.Variable(…, name=”weights”)
biases = tf.Variable(…, name=”biases”)
affine = tf.matmul(inputs, weights) + biases
output = tf.nn.relu(affine, name=scope)
NOTE: This constructor validates the given name. Valid scope names match one of the following regular expressions:

[A-Za-z0-9.][A-Za-z0-9_.\-/]* (for scopes at the root)
[A-Za-z0-9_.\-/]* (for other scopes)
Args:

name: A name for the scope.
Returns:

A context manager that installs name as a new name scope.

Raises:

ValueError: If name is not a valid scope name, according to the rules above.
prevent_feeding

# 二.Session(tf.Session)

# -*- coding: utf-8 -*-

from __future__ import print_function,division
import tensorflow as tf

#build graph
a=tf.constant(2.)
b=tf.constant(5.)
c=a*b

#construct session
sess=tf.Session()

#Evaluate the tensor c
print(sess.run(c))

#close session
sess.close()

# Using the close() method.
sess = tf.Session()
sess.run(...)
sess.close()

# Using the context manager.
with tf.Session() as sess:
sess.run(...)

## 属性

graph：“投放”到session中的图
graph_def：图的描述
sess_str

## 重要函数：

tf.Session.__init__(target=”, graph=None, config=None)

target:（可选）连接的执行引擎，默认是使用in-process引擎，分布式TensorFLow有更多的例子。
graph: (可选)投放进的计算图（graph），要是没有指定的话，那么默认的图就会被投放到这个session。要是你在同一个进程里面用了很多的图，你将为各个图使用不用的session，但是每一个graph都能够在多个session中使用。在这种情况下，经常显式的传递graph参数到session的构造里面。
config: (可选) A ConfigProto protocol buffer with configuration options for the session.

fetches: 一个单独的图的元素，或者一个图的元素的列表。或者一个字典，这个字典的值是刚刚所说的一个图的元素（元素列表）（见下面第四部分的例子）。图的元素可以是一个操作（那么fetch回来的值将是None）；一个tensor（反回的值将是将是表示这个tensor值的numpy ndarray对象）；一个sparsetensor（稀疏tensor）；一个get_tensor_handle的操作；一个表示tensor或者操作名称的string
feed_dict: 一个字典，为之前“占位”的元素“喂”给值。（具体见第四部分的例子。）
The optional feed_dict argument allows the caller to override the value of tensors in the graph. Each key in feed_dict can be one of the following types:
If the key is a Tensor, the value may be a Python scalar, string, list, or numpy ndarray that can be converted to the same dtype as that tensor. Additionally, if the key is a placeholder, the shape of the value will be checked for compatibility with the placeholder.
If the key is a SparseTensor, the value should be a SparseTensorValue.
If the key is a nested tuple of Tensors or SparseTensors, the value should be a nested tuple with the same structure that maps to their corresponding values as above.
Each value in feed_dict must be convertible to a numpy array of the dtype of the corresponding key.
options: A [RunOptions] protocol buffer

The optional options argument expects a [RunOptions] proto. The options allow controlling the behavior of this particular step (e.g. turning tracing on).

The optional run_metadata argument expects a [RunMetadata] proto. When appropriate, the non-Tensor output of this step will be collected there. For example, when users turn on tracing in options, the profiled info will be collected into this argument and passed back.

Example:

   a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v a Python list with 2 numpy arrays: the numpy array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' the numpy array [10, 20] and
# 'b' the numpy array [1.0, 2.0]
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].

tf.Session.close()

as_default()

c = tf.constant(..)
sess = tf.Session()

with sess.as_default():
assert tf.get_default_session() is sess
print(c.eval())
c = tf.constant(...)
sess = tf.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())