In [8]:
#note from dataguru Tensorflow神经网络框架-12周(2017全新)
import tensorflow as tf
m1 = tf.constant([[3,3]])
m2 = tf.constant([[2],[3]])
product = tf.matmul(m1,m2)
print(product)
Output:
Tensor("MatMul_1:0", shape=(1, 1), dtype=int32)
In [4]:
#with 节省了tf.close()
with tf.Session() as sess:
result = sess.run(product)
print(result)
Output:
[[15]]
In [6]:
x = tf.Variable([1,2])
a = tf.constant([3,3])
sub = tf.subtract(x,a)
add = tf.add(x,sub)
#变量需要初始化
init = tf.global_variables_initializer()
with tf.Session() as sess2:
sess2.run(init)
resultsub = sess2.run(sub)
resultadd = sess2.run(add)
print(resultsub,resultadd)
Output:
[-2 -1] [-1 1]
In [8]:
cons1 = tf.constant(2.0)
cons2 = tf.constant(3.0)
cons3 = tf.constant(4.0)
add = tf.add(cons1,cons2)
mul = tf.multiply(add,cons3)
with tf.Session() as sess3:
#fetch 多了 operation point, i.e. op.
result = sess3.run([add,mul])
print(result)
Output:
[5.0, 20.0]
In [4]:
p1 = tf.placeholder(tf.float32)
p2 = tf.placeholder(tf.float32)
out4 = tf.multiply(p1,p2)
with tf.Session() as sess4:
#dictionary 填充占位符
print(sess4.run(out4,feed_dict={p1:[7.],p2:[3.]}))
Output:
[21.]
In [7]:
import tensorflow as tf
import numpy as np
x_data = np.random.rand(100)
y_data = x_data*2.3 +5
b = tf.Variable(0.)
k = tf.Variable(0.)
y = b*x_data +k
loss = tf.reduce_mean(tf.square(y_data-y))
optm = tf.train.GradientDescentOptimizer(0.2)
train = optm.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess5:
sess5.run(init)
for step in range(201):
sess5.run(train)
if (step % 20 == 0):
print(sess5.run([k,b]))
Output:
[2.4711082, 1.3275841]
[4.892301, 2.499181]
[4.929808, 2.4298172]
[4.9542522, 2.3846083]
[4.970184, 2.3551435]
[4.9805675, 2.33594]
[4.987335, 2.3234234]
[4.9917455, 2.3152661]
[4.9946203, 2.3099494]
[4.996494, 2.3064845]
[4.997715, 2.3042264]