创建一个constant张量
关于张量:
多维数组可以被称之为张量,和numpy中的数组一样,张量也有类型和形状。
例1
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]= "3"
a = tf.constant(5,name = 'input_a')
b = tf.constant(3,name = 'input_b')
c = tf.multiply(a,b,name='mul_c')
d = tf.add(a,b,name='add_d')
e = tf.add(c,d,name='add_e')
sess = tf.Session()
print(sess.run(a))
print(sess.run(b))
print(sess.run(c))
print(sess.run(d))
print(sess.run(e))
结果:
5
3
15
8
23
例2
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]= "3"
a = tf.constant([5,3],name = 'input_a')
b = tf.reduce_prod(a,name = 'pro_b')
c = tf.reduce_sum(a,name = 'sum_c')
d = tf.add(b,c,name = 'add_d')
sess = tf.Session()
print(sess.run(a))
print(sess.run(b))
print(sess.run(c))
print(sess.run(d))
例3
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]= "3"
#一行两列的张量
m1 = tf.constant([[2,2]])
#两行一列的张量
m2 = tf.constant([[3],[3]])
dot_operation = tf.matmul(m1,m2)
#方法一
sess = tf.Session()
result = sess.run(dot_operation)
print(result)
#方法二
with tf.Session() as sess:
result = sess.run(dot_operation)
print(result)
例4
import tensorflow as tf
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]= "3"
#创建未知的张量
x1 = tf.placeholder(dtype=tf.float32,shape=None)
y1 = tf.placeholder(dtype=tf.float32,shape=None)
z1 = x1 + y1
x2 = tf.placeholder(dtype=tf.float32,shape=[2,1])
y2 = tf.placeholder(dtype=tf.float32,shape=[1,2])
z2 = tf.matmul(x2,y2)
with tf.Session() as sess:
#一个一个处理
#z1_value = sess.run(z1,feed_dict={x1:1,y1:2})
#一起处理
z1_value,z2_value = sess.run([z1,z2],feed_dict={
x1:1,y1:2,x2:[[2],[2]],y2:[[3,3]]
})
print(z1_value)
print(z2_value)
结果:
3.0
[[6. 6.]
[6. 6.]]
例5
#函数y= 0.1x + 0.3
import tensorflow as tf
#numpy是一个有关科学计算的模块
import numpy as np
#防止他输出一些没用的东西
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]= "3"
#创造数据,用这些数据去训练神经网络
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data*0.1 + 0.3
#开始创建结构
Weights = tf.Variable(tf.random_uniform([1],-1.0,1.0))
biases = tf.Variable(tf.zeros([1]))
y = Weights*x_data + biases
loss = tf.reduce_mean(tf.square(y-y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
#-----------------------------------------------
sess = tf.Session()
sess.run(init)
for step in range(201):
sess.run(train)
if step%20 == 0:
print(step,sess.run(Weights),sess.run(biases))