1.创建图,启动图
import tensorflow as tf
m1=tf.constant([[3,3]])#定义两个常量
m2=tf.constant([[3],[2]])
product=tf.matmul(m1,m2)#创建一个矩阵乘法op
print(product)
#session第一种写法
sess=tf.Session()
result=sess.run(product)
print(result)
sess.close()
#session第二种写法
with tf.Session() as sess:
result=sess.run(product)
print(result)
2.变量
import tensorflow as tf
x=tf.Variable([1,2]) #定义一个变量x
a=tf.constant([3,3])
sub=tf.subtract(x,a) #减法
add=tf.add(a,sub) #加法
init=tf.global_variables_initializer() #变量初始化
with tf.Session() as sess:
sess.run(init) #要先run一下初始化这个过程
print(sess.run(sub))
print(sess.run(add))
state=tf.Variable(0,name='counter') #定义一个变量初始化为
new_value=tf.add(state,1) #给state自增1
update=tf.assign(state,new_value) #赋值op。assign,把new_value的值赋给state,然后等于update
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(state))
for i in range(5):
sess.run(update)
print(sess.run(state))
3.fetch and feed
import tensorflow as tf
input1=tf.constant(3.0)
input2=tf.constant(2.0)
input3=tf.constant(5.0)
add=tf.add(input2,input3)
mul=tf.multiply(input1,add)
with tf.Session() as sess:
result=sess.run([mul,add])
print(result)
input1=tf.placeholder(tf.float32)
input2=tf.placeholder(tf.float32)
output=tf.multiply(input1,input2)
with tf.Session() as sess:
print(sess.run(output,feed_dict={input1:[8.],input2:[2.]}))
4.第一个简单的实例(拟合)
mport tensorflow as tf
import numpy as np
#使用numpy生成100个随机点
x_data=np.random.rand(100)
y_data=x_data*0.1+0.2 #这是一条直线,通过最小化,将刚开始的b,k为0进行和0.1,0.2无限接近
#定义两个变量,构造一个线性模型
b=tf.Variable(0.)
k=tf.Variable(0.)
y=k*x_data+b
#二次代价函数
loss=tf.reduce_mean(tf.square(y_data-y)) #二者估计拟合的联系,计算平方的平均值
#定义一个梯度下降法来进行训练的优化器,已经封装好了,直接调用。
optimizer=tf.train.GradientDescentOptimizer(0.2) #0.2是学习率??
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(201):
sess.run(train)
if step%20==0:
print(step,sess.run([k,b]))
5.非线性回归
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# 定义两个placeholder
x = tf.placeholder(tf.float32, [None,1])
y = tf.placeholder(tf.float32, [None,1])
# 定义中间层
Weights_L1 = tf.Variable(tf.random_normal([1,10]))
bias_L1 = tf.Variable(tf.zeros([1,10]))
Wx_plus_b_L1 = tf.matmul(x, Weights_L1) + bias_L1
# 激活函数
L1 = tf.nn.tanh(Wx_plus_b_L1)
# 定义输出层
Weights_L2 = tf.Variable(tf.random_normal([10,1]))
bias_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1,Weights_L2) + bias_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)
# 二次代价函数(损失函数)
loss = tf.reduce_mean(tf.square(y-prediction))
# 梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
# 变量的初始化
sess.run(tf.global_variables_initializer())
for _ in range(2000):
sess.run(train_step, feed_dict={x:x_data, y:y_data})
# 获得预测值
prediction_value = sess.run(prediction,feed_dict={x:x_data})
plt.figure()
plt.scatter(x_data, y_data)
plt.plot(x_data, prediction_value,'r-', lw=5)
plt.show()