import tensorflow as tf
a=3#创建变量
w=tf.Variable([[0.5,1.0]])#把这个行向量转化为tensorflow的格式
x=tf.Variable([[2.0],[1.0]])#列向量
y=tf.matmul(w,x)#进行矩阵的乘法
print (w)
#简单的w*x这么麻烦,对tf也是醉了
int_top=tf.global_variables_initializer()#
with tf.Session() as sess:#初始化再session里面进行的
sess.run(int_top)
print (y.eval())
print(y)
<tf.Variable 'Variable_18:0' shape=(1, 2) dtype=float32_ref>
[[ 2.]]
#可以和numpy对比一下看看 最好都用float32的
norm=tf.random_normal([2,3],mean=-1,stddev=4)
#shuffle the first dimension
c=tf.constant([[1,2],[3,4],[5,6]])
shuff=tf.random_shuffle(c)
#再tensorflow中进行一个操作一定要先创建session,然后才能执行操作
sess=tf.Session()
print (sess.run(norm))
print (sess.run(shuff))
[[ 0.51443636 -2.00240898 -1.08036768]
[-0.37550694 -0.03160679 1.06559968]]
[[3 4]
[1 2]
[5 6]]
#实现一个自增1的程序,虽然复杂,但是再tensorflow里面,进行复杂的操作和简单的操作,代码量其实事差不多的
state=tf.Variable(0)#创建一个0,并转化为tf的格式
new_value=tf.add(state,tf.constant(1))#每次增加1
update=tf.assign(state,new_value)#替换操作
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print (sess.run(state))#执行把0给state的操作
for _ in range(3):
sess.run(update)
print (sess.run(state))
0
1
2
3
tf.zeros([3, 4], int32) ==> [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] 'tensor' is [[1, 2, 3], [4, 5, 6]]¶tf.zeros_like(tensor) ==> [[0, 0, 0], [0, 0, 0]] tf.ones([2, 3], int32) ==> [[1, 1, 1], [1, 1, 1]] 'tensor' is [[1, 2, 3], [4, 5, 6]]¶tf.ones_like(tensor) ==> [[1, 1, 1], [1, 1, 1]] Constant 1-D Tensor populated with value list.¶tensor = tf.constant([1, 2, 3, 4, 5, 6, 7]) => [1 2 3 4 5 6 7] Constant 2-D tensor populated with scalar value -1.¶tensor = tf.constant(-1.0, shape=[2, 3]) => [[-1. -1. -1.] [-1. -1. -1.]] tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0] 'start' is 3¶'limit' is 18¶'delta' is 3¶tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] |
import numpy as np
a=np.zeros((3,3))
#虽然numpy可以转化为tensorflow的格式,但是我们一般是通过session的格式来转换的
ta=tf.convert_to_tensor(a)
with tf.Session() as sess:
print (sess.run(ta))
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
#基本操作http://blog.csdn.net/xxzhangx/article/details/54606040
input1=tf.placeholder(tf.float32)#先占两个位置,类似于先定义一个变量,里面事float32位的
input2=tf.placeholder(tf.float32)
output=tf.mul(input1,input2)#执行相乘的操作
with tf.Session() as sess:
print (sess.run([output],feed_dict={input1:[7.],input2:[2.]}))#后面参数通过run的时候赋值
[array([14.], dtype=float32)]
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
y = tf.subtract(a, b)
sess = tf.Session()
print (sess.run(y, feed_dict={a: 3, b: 3}))
0.0
import tensorflow as tf
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
y = tf.div(a, b)
sess = tf.Session()
print (sess.run(y, feed_dict={a: 3, b: 3}))
1.0
#取模
import tensorflow as tf
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
y = tf.mod(a, b)
sess = tf.Session()
print (sess.run(y, feed_dict={a: 3, b: 3}))
0.0
import tensorflow as tf
v1=tf.Variable(tf.random_normal([1,2],name='v1'))
v2=tf.Variable(tf.random_normal([2,3],name='v2'))
init_op=tf.global_variables_initializer()
saver=tf.train.Saver()
with tf.Session() as sess:
sess.run(init_op)
print('V1:',sess.run(v1))
print('v2:',sess.run(v2))
saver_path=saver.save(sess,'save/model.ckpt')
print('model saved in file:',saver_path)
V1: [[-1.2444832 1.706175 ]]
v2: [[ 2.5250704 0.40654743 -0.9346154 ]
[ 0.23543198 1.6982827 1.1330606 ]]
model saved in file: save/model.ckpt
load变量时出现问题,如何解决???
import tensorflow as tf
#tf.reset_default_graph()
v1=tf.Variable(tf.random_normal([1,2]),name="v1")
v2=tf.Variable(tf.random_normal([2,3]),name="v2")
saver=tf.train.Saver()
#init_op=tf.global_variables_initializer()
with tf.Session() as sess:
# sess.run(init_op)
model_file=tf.train.latest_checkpoint("save/")
saver.restore(sess,model_file)
print(sess.run(v1))
print(sess.run(v2))
print("model restored!!!")
NotFoundError (see above for traceback): Key v1_5 not found in checkpoint
[[Node: save_5/RestoreV2_5 = RestoreV2[dtypes=[DT_FLOAT], _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_save_5/Const_0_0, save_5/RestoreV2_5/tensor_names, save_5/RestoreV2_5/shape_and_slices)]]
或者就是回复前后的数值不一样???待解决
tf.variable_scope() && tf.get_variable()使用
'定义RNN cell 权重参数'
with tf.variable_scope('rnn_cell'):
'''
由于tf.Variable()每次都在创建新的对象,所有的reuse=True和他并没有关系。对与get_variable()来说,如果已经创建了
变量对象,就把那个对象返回,如果没有创建对象的话,就创建一个新的。
'''
W=tf.get_variable('W',[num_classes+state_size,state_size])
b=tf.get_variable('b',[state_size],initializer=tf.constant_initializer(0.0))
'使之定义为reuse模式,循环使用,保持参数相同'
def rnn_cell(rnn_input,state):
with tf.variable_scope('rnn_cell',reuse=True):
W=tf.get_variable('W',[num_classes+state_size,state_size])
b=tf.get_variable('b',[state_size],initializer=tf.constant_initializer(0.0))
'定义rnn CELL具体操作,这里指的是最简单的rnn,不是LSTM'
return tf.tanh(tf.amtmul(tf.concat([rnn_input,state],axis=1),W)+b)