深度学习理论系列:
深度学习01
深度学习02:CNN
深度学习03:Tips for Deep Learning
深度学习04:RNN
深度学习tensorflow系列:
tensorflow01:基础概念
1 图(graphs)
用图来表示计算任务,有默认图,只需要自己加节点就行,以下就是两个图,图要在session里运行
用tensorflow2.0运行1.0的代码
import tensorflow.compat.v1 as tf
if ~tf.executing_eagerly():
tf.disable_eager_execution()
#创建两个常量op
m1 = tf.constant([[3,3]])
m2 = tf.constant([[2],[3]])
#矩阵相乘op
pro = tf.matmul(m1,m2)
with tf.Session() as sess:
result = sess.run(pro)
print(result)
x = tf.constant([1,2])
a = tf.constant([3,3])
#增加一个减法op
sub = tf.subtract(x,a)
#增加一个加法op
add = tf.add(x,sub)
init = tf.global_variables_initializer()
with tf.Session() as sess:
print(sess.run(sub))
print(sess.run(add))
2 变量
#创建一个变量初始化为0
state = tf.Variable(0,name='counter')
#创建一个op,作用是state加1
new_value = tf.add(state,1)
#赋值op,把new_value赋值给state
update = tf.assign(state,new_value)
#变量初始化
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(state))
for i in range(5):
sess.run(update)
print(sess.run(state))
3 fetch和feed
#Fetch,在一个session中同时执行多个op
input1 = tf.constant(3.0)
input2 = tf.constant(2.0)
input3 = tf.constant(5.0)
add = tf.add(input2,input3)
mul = tf.multiply(input1,add)
with tf.Session() as sess:
result = sess.run([mul,add])
print(result)
#feed
#创建占位符
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1,input2)
with tf.Session() as sess:
result = sess.run(output,feed_dict={input1:[4.0],input2:[2.0]})
print(result)
4 小案例
#使用numpy创建100个随机数
x_data = np.random.rand(100)
y_data = x_data*0.1+0.2
#构建模型
b = tf.Variable(0.)
k = tf.Variable(0.)
y = k*x_data + b
#二次函数
loss = tf.reduce_mean(tf.square(y_data-y))
#定义一个梯度下降发来进行训练的优化器
optimizer = tf.train.GradientDescentOptimizer(0.2)
#最小化代价函数
train = optimizer.minimize(loss)
#初始化变量
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(201):
sess.run(train)
if step % 20 ==0:
print(step,sess.run([k,b]))