tensorflow 主要由计算图Graph、张量tensor、会话Session组成
import tensorflow as tf
a=tf.constant([1.0,2.0],name='a')
b=tf.constant([2.0,3.0],name='b')
result=a+b #result为张量tensor,亦可以称为节点
print(a.graph is tf.get_default_graph())
print(result) #张量的命名,node:source_output形式
with tf.Session() as sess:
print(sess.run(result))
output:
True
Tensor(“add:0”, shape=(2,), dtype=float32)
[ 3. 5.]
sess=tf.Session()
#通过对比上下两种会话方式,可以略知一二。
print(result.eval(session=sess))
sess.close()
[ 3. 5.]
g1=tf.Graph()
with g1.as_default():
v=tf.get_variable('v',initializer=tf.zeros_initializer()(shape=[4]))
g2=tf.Graph()
with g2.as_default():
v=tf.get_variable('v',initializer=tf.ones_initializer()(shape=[1]))
with tf.Session(graph=g1) as sess:
tf.global_variables_initializer().run()
with tf.variable_scope('',reuse=True):
print(sess.run(tf.get_variable('v')))
#initializer=tf.zeros_initializer要在背后加上()
[ 0. 0. 0. 0.]
sess=tf.InteractiveSession()#将节点所在的会话设置为默认会话
config=ConfigProto(allow_sof_placement=True,log_device_placement=True)
g1=tf.Graph()
with g1.as_default():
w1=tf.Variable(tf.random_normal([2,3],stddev=1))
w2=tf.Variable(tf.random_normal([3,1],stddev=1))
x=tf.placeholder(tf.float32,shape=[1,2],name='input')
a=tf.matmul(x,w1)
y=tf.matmul(a,w2)
with tf.Session(graph=g1) as sess:
tf.global_variables_initializer().run()
print(sess.run(y,feed_dict={x:[[0.7,0.9]]}))
[[ 0.49852759]]
tf.__version__
'1.4.0'
v1=tf.constant([1.0,2.0,3.0,4.0])
v2=tf.constant([4.0,3.0,2.0,1.0])
sess=tf.InteractiveSession()
a=sess.run(tf.greater(v1,v2))
sess.close()
print(a)
Output:
variables
[False False True True]
#配置会话(并行线程数、GPU分配策略、运算超时时间等)
config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=True)
sess1=tf.Session(config=config)
#allow_soft_placement有GPU下一般设为True
#log_device_placement为True时,日志中将会记录每个节点被安排在哪个设备上以方便调试;生产环境中设置为False,
#减少日志量
#简易的初始化权重函数
def get_weight(shape,lamda):
var=tf.Variable(tf.random_normal(shape),dtype=tf.float32)
tf.add_to_collection('losses',
tf.contrib.layers.l2_regularizer(0.1)(var))
return var
#每一层网络中节点的个数
layer_dimension=[2,10,10,10,1]
#网络层数
n_layers=len(layer_dimension)
x=tf.placeholder(tf.float32,shape=(None,2))
y_=tf.placeholder(tf.float32,shape=(None,1))
batch_size=8
cur_layer=x
in_dimension=layer_dimension[0]
for i in range(1,n_layers):
out_dimension=layer_dimension[i]
weight=get_weight([in_dimension,out_dimension],0.001)
bias=tf.Variable(tf.constant(0.1,shape=[out_dimension]))
cur_layer=tf.nn.relu(tf.matmul(cur_layer,weight)+bias)
in_dimension=layer_dimension[i]
mse_loss=tf.reduce_mean(tf.square(y_-cur_layer))
tf.add_to_collection('losses',mse_loss)
loss=tf.add_n(tf.get_collection('losses'))
#通过设定get_weight函数来初始化权重
#简易地构造激活函数为relu的全连接层神经网络