第4章 TensorFlow编程基础

4.1 使用Session编写hello world:
程序:

import tensorflow as tf
hello = tf.constant('hello tensorflow')
sess = tf.Session()
print(sess.run(hello))
sess.close()

结果:

b'hello tensorflow'

4.2 with session的使用
此段程序为调用会话的方法之一
程序:

import tensorflow as tf
a = tf.constant(3)
b = tf.constant(4)
with tf.Session() as sess:
    print("xiangjia %i" % sess.run(a+b))
    print("xiangjain %i" % sess.run(a*b))

结果:

xiangjia 7
xiangjain 12

4.3 演示注入机制
占位符的定义,通过feed机制将具体数值3,4通过占位符传入会话,在会话中进行相加和相乘。
程序:

# 使用注入机制,将具体的实参注入到相应的placeholder中。feed_dict={a:3,b:4}

import tensorflow as tf
a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)
add = tf.add(a,b)
mul = tf.multiply(a,b)
with tf.Session() as sess:
    print("xiangjia: %i" % sess.run(add,feed_dict={a:3,b:4}))
    print("xiangcheng: %i" % sess.run(mul, feed_dict={a: 3, b: 4}))
    print(sess.run([mul,add], feed_dict={a: 3, b: 4})) # 一次将多个结果取出

mul = tf.multiply(a,b)
with tf.Session() as sess:
    # 将op运算通过run打印出来
    print("xiangjia: %i" %sess.run(add,feed_dict={a:3,b:4}))
    # 将add节点打印出来
    print("相乘:%i" %sess.run(mul,feed_dict={a:3,b:4}))
    print(sess.run([mul,add],feed_dict={a:3,b:4}))


结果:

xiangjia: 7
xiangcheng: 12
[12, 7]
xiangjia: 7
相乘:12
[12, 7]

4.4 线性回归模型保存及载入
程序:

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

plotdata = {"batchsize": [], "loss": []}


def moving_average(a, w=10):
    if len(a) < w:
        return a[:]
    return [val if idx < w else sum(a[(idx - w):idx]) / w for idx, val in enumerate(a)]


# 生成模拟数据
train_X = np.linspace(-1, 1, 100)
train_Y = 2 * train_X + np.random.randn(*train_X.shape) * 0.3  # y=2x,但是加入了噪声
# 图形显示
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.legend()
plt.show()

tf.reset_default_graph()

# 创建模型
# 占位符
X = tf.placeholder("float")
Y = tf.placeholder("float")
# 模型参数
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.zeros([1]), name="bias")
# 前向结构
z = tf.multiply(X, W) + b

# 反向优化
cost = tf.reduce_mean(tf.square(Y - z))
learning_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)  # Gradient descent

# 初始化变量
init = tf.global_variables_initializer()
# 参数设置
training_epochs = 20
display_step = 2
saver = tf.train.Saver()  # 生成saver
savedir = "F:/shendu/000"  # 生成模型的路径
# 启动session
with tf.Session() as sess:
    sess.run(init)

    # 添加session中的训练代码 Fit all training data
    for epoch in range(training_epochs):
        for (x, y) in zip(train_X, train_Y):
            sess.run(optimizer, feed_dict={X: x, Y: y})

        # 显示训练中的详细信息
        if epoch % display_step == 0:
            loss = sess.run(cost, feed_dict={X: train_X, Y: train_Y})
            print("Epoch:", epoch + 1, "cost=", loss, "W=", sess.run(W), "b=", sess.run(b))
            if not (loss == "NA"):
                plotdata["batchsize"].append(epoch)
                plotdata["loss"].append(loss)

    print(" Finished!")
    saver.save(sess, savedir + "linermodel.cpkt")  # 保存模型
    print("cost=", sess.run(cost, feed_dict={X: train_X, Y: train_Y}), "W=", sess.run(W), "b=", sess.run(b))
    # print ("cost:",cost.eval({X: train_X, Y: train_Y}))

    # 图形显示
    plt.plot(train_X, train_Y, 'ro', label='Original data')
    plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
    plt.legend()
    plt.show()

    plotdata["avgloss"] = moving_average(plotdata["loss"])
    plt.figure(1)
    plt.subplot(211)
    plt.plot(plotdata["batchsize"], plotdata["avgloss"], 'b--')
    plt.xlabel('Minibatch number')
    plt.ylabel('Loss')
    plt.title('Minibatch run vs. Training loss')

    plt.show()

# 重启一个session

with tf.Session() as sess2:
    sess2.run(tf.global_variables_initializer())  # 初始化模型
    saver.restore(sess2, savedir + "linermodel.cpkt")
    print("x=0.2,z=", sess2.run(z, feed_dict={X: 0.2}))
    

结果:

cost= 0.095335424 W= [1.935207] b= [0.03402096]
x=0.2,z= [0.42106238]

4.6 保存检查点
程序:

import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

# 定义生成loss可视化的函数
plotdata = {"batchsize":[],"loss":[]}
def moving_average(a,w=10):
    if len(a) < w:
        return a[:]
    return [val if idx <w else sum(a[(idx-w):idx])/w for idx,val in enumerate(a)]

# 生成模拟数据
train_X = np.linspace(-1,1,100)
train_Y = 2*train_X+np.random.randn(*train_X.shape )* 0.3
# 图形显示
plt.plot(train_X,train_Y,'ro',label = 'original data')
plt.legend()
plt.show()
# 重置图
tf.reset_default_graph()

# 创建模型
X= tf.placeholder('float')
Y= tf.placeholder('float')
# 模型参数
W = tf.Variable(tf.random_normal([1]),name='weight')
b = tf.Variable(tf.zeros([1]),name='bias')
# 前项结构
z = tf.multiply(W,X)+b   # multiply两个数相乘

# 反向优化
cost = tf.reduce_mean(tf.square(Y -z))
learning_rate = 0.01
#梯度下降
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

# 初始化所有变量
init = tf.global_variables_initializer()
# 定义参数
training_epochs = 20
display_step = 2

saver = tf.train.Saver()
savedir = 'log/'

# 启动session
with tf.Session() as sess:
    sess.run(init)
    # plotdata = {'batchsize':[],'loss':[]}             # 存放批次值和损失值
    # 向模型输入数据
    for epoch in range(training_epochs):
        for (x,y) in zip(train_X,train_Y):
            sess.run(optimizer,feed_dict={X:x,Y:y})

        # 显示训练中的详细信息
        if epoch % display_step ==0:
            loss = sess.run(cost,feed_dict = {X:train_X,Y:train_Y})
            print("Epoch:",epoch+1,"cost=",loss,"W=",sess.run(W),"b=",sess.run(b))
            if not (loss == "NA"):
                plotdata["batchsize"].append(epoch)
                plotdata["loss"].append(loss)
                saver.save(sess, savedir + "linermodel.cpkt",global_step=epoch)

    print("Finished")

    print("cost=",sess.run(cost,feed_dict={X:train_X,Y:train_Y}),
          "W=",sess.run(W),"b=",sess.run(b))



# 显示模型
    plt.plot(train_X,train_Y,'ro',label = 'Original data')
    plt.plot(train_X,sess.run(W) * train_X + sess.run(b),label = 'Fittedline')
    plt.legend()
    plt.show()

    plotdata["avgloss"] = moving_average(plotdata["loss"])
    plt.figure(1)
    plt.subplot(211)
    plt.plot(plotdata["batchsize"],plotdata["avgloss"],'b--')
    plt.xlabel("Minibatch number")
    plt.ylabel("Loss")
    plt.title("Minibatch run vs. Training loss")
    plt.show()


# 重启一个session,载入检查点
load_epoch= 18
with tf.Session() as sess2:
    sess2.run(tf.global_variables_initializer())
    saver.restore(sess2,savedir+"linermodel.cpkt-"+str(load_epoch))
    print("x = 0.2,z=",sess2.run(z,feed_dict={X:0.2}))

结果:

Epoch: 1 cost= 2.7615378 W= [-0.4050443] b= [0.7777104]
Epoch: 3 cost= 0.3190305 W= [1.4279077] b= [0.292238]
Epoch: 5 cost= 0.12164681 W= [1.9255583] b= [0.10493416]
Epoch: 7 cost= 0.10939674 W= [2.0546331] b= [0.0554641]
Epoch: 9 cost= 0.10886942 W= [2.0880146] b= [0.04265514]
Epoch: 11 cost= 0.10890977 W= [2.0966456] b= [0.03934294]
Epoch: 13 cost= 0.10893204 W= [2.0988786] b= [0.0384862]
Epoch: 15 cost= 0.10893858 W= [2.0994556] b= [0.03826475]
Epoch: 17 cost= 0.10894032 W= [2.0996044] b= [0.03820764]
Epoch: 19 cost= 0.10894078 W= [2.0996432] b= [0.03819278]
Finished
cost= 0.10894085 W= [2.0996497] b= [0.03819029]

x = 0.2,z= [0.45812142]

4.7 使用MonitoredTrainingSession函数来按照训练时间保存检查点
程序:

# tf.train.MonitoredTrainingSession 函数按照训练时间来保存的。通过指定
# save_checkpoint_secs参数的具体秒数,来设置每训练多久保存一次检查点
# 如果没有设置此参数,默认的保存时间间隔为10分钟。

import tensorflow as tf
tf.reset_default_graph()
global_step = tf.train.get_or_create_global_step()
step = tf.assign_add(global_step,1)
# 设置检查点路径为log/checkpoints
with tf.train.MonitoredTrainingSession(checkpoint_dir="log/checkpoints",
                                       save_checkpoint_secs=2) as sess:
    print(sess.run([global_step]))
    while not sess.should_stop():
        i = sess.run(step)
        print(i)

4.9 get_variable和Variable的区别
程序:

# get_variable一般会配合variable_scope(变量作用域)一起使用,以实现共享变量。
'''
get_variable(<name>,<shape>,<initializer>)
'''
import tensorflow as tf
var1 = tf.Variable(1.0,name="firstvar")
print("var1:",var1.name)
var1 = tf.Variable(2.0,name="firstvar")
print("var1:",var1.name)
var2 = tf.Variable(3.0)
print("var2:",var2.name)
var2 = tf.Variable(4.0)
print("var2:",var2.name)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print("var1=",var1.eval())
    print("var2=",var2.eval())
get_var1 = tf.get_variable("firstvar",[1],initializer=tf.constant_initializer(0.3))
print("get_var1:",get_var1.name)

# 程序在定义第二个get_variable时发生了崩溃。表明了使用get_variable只能定义一次指
# 定名称的变量
# get_var1 = tf.get_variable("firstvar",[1],initializer = tf.constant_initializer(0.4))
# print("get_var1:",get_var1.name)

get_var1 = tf.get_variable("firstvar1",[1],initializer = tf.constant_initializer(0.4))
print("get_var1:",get_var1.name)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print("get_var1=",get_var1.eval())

结果:

var1: firstvar:0
var1: firstvar_1:0
var2: Variable:0
var2: Variable_1:0
var1= 2.0
var2= 4.0
get_var1: firstvar_2:0
get_var1: firstvar1:0
get_var1= [0.4]

4.10 在特定作用域下获取变量
程序:

# 使用get_variable创建两个同样名字的变量是行不通的。可以通过variable_scope将他们隔开。


import tensorflow as tf
with tf.variable_scope("test1",):
    var1 = tf.get_variable("firstvar",shape = [2],dtype=tf.float32)
    with tf.variable_scope("test2"):   # scope可以进行嵌套,将此行以及下一行进行缩进。
        var2 = tf.get_variable("firstvar",shape=[2],dtype = tf.float32)
print("var1:",var1.name)
print("var2:",var2.name)
# 使用作用域中的reuse参数来实现共享变量问题。get_variable中有一个reuse=Ture属性,
# 表示使用已经定义过的变量
with tf.variable_scope("test1",reuse = True):
    var3 = tf.get_variable("firstvar",shape = [2],dtype=tf.float32)
    with tf.variable_scope("test2"):   # scope可以进行嵌套,将此行以及下一行进行缩进。
        var4 = tf.get_variable("firstvar",shape=[2],dtype = tf.float32)
print("var3:",var3.name)
print("var4:",var4.name)
# 得到的结果显示var1和var3为同一个变量,var2和var4为同一变量。

结果:

var1: test1/firstvar:0
var2: test1/test2/firstvar:0
var3: test1/firstvar:0
var4: test1/test2/firstvar:0

4.13 作用域与操作符的受限范围
演示variable_scope的as用法,以及对应的作用域
程序:

import tensorflow as tf

with tf.variable_scope("scope1") as sp:
    var1 = tf.get_variable("v",[1])
print("sp:",sp.name)
print("var1:",var1.name)
with tf.variable_scope("scope2"):
    var2 = tf.get_variable("v",[1])
    with tf.variable_scope(sp) as sp1:
        # 当使用 as 这种方式定义作用域,将不再受到外围的scope所限制。
        var3 = tf.get_variable("v3",[1])

print("sp1:",sp1.name)
print("var2:",var2.name)
print("var3:",var3.name)

with tf.variable_scope("scope"):
    with tf.name_scope("bar"):   # v的命名只受到scope的限制,name_scope只能限制op,
                                 # 不能限制变量的命名
        v = tf.get_variable("v",[1])
        x = 1.0 + v
print("v:",v.name)
print("x.op:",x.op.name)

with tf.variable_scope("scope2"):
    var2 = tf.get_variable("v",[1])

    with tf.variable_scope(sp) as sp1:
        var3 = tf.get_variable("v3",[1])
        with tf.variable_scope(""):
            var4 = tf.get_variable("V4",[1])

with tf.variable_scope("scope"):
    with tf.name_scope("bar"):
        v = tf.get_variable("v",[1])
        x = 1.0 + v
        with tf.name_scope(""):
            y = 1.0 + v

结果:

sp: scope1
var1: scope1/v:0
sp1: scope1
var2: scope2/v:0
var3: scope1/v3:0
v: scope/v:0
x.op: scope/bar/add

4.14 图的基本操作
程序:

# 建立图
import tensorflow as tf
c = tf.constant(0.0)  # 默认图中建立
g = tf.Graph()  # 使用此函数初次建立图
with g.as_default():
    c1 = tf.constant(0.0)
    print(c1.graph)
    print(g)
    print(c.graph)
g2 = tf.get_default_graph()  # 获得图
print(g2)
tf.reset_default_graph()   # 重置图  在使用此函数时必须保证当前图的资源已经全部释放
g3 = tf.get_default_graph()
print(g3)

# 获取张量
print(c1.name)
t = g.get_tensor_by_name(name="Const:0")   # t就是前面定义的张量c1
print(t)

print("**********************************************")
# 获取节点操作
a = tf.constant([[1.0,2.0]])
b = tf.constant([[1.0],[3.0]])
tensor1 = tf.matmul(a,b,name="exampleop")
print(tensor1.name,tensor1)
test = g3.get_tensor_by_name("exampleop:0")  # 张量的名字
print(test)
print("00 ",tensor1.op.name)
testop = g3.get_tensor_by_name("exampleop:0")
print(testop)

with tf.Session() as sess:
    test = sess.run(test)
    print(test)
    test = tf.get_default_graph().get_tensor_by_name("exampleop:0")
    print(test)

# 获取元素列表
tt2 = g.get_operations()   # 由于g里面只有一个常量,所以只打印了一条信息
print(tt2)

# 获取对象
tt3 = g.as_graph_element(c1)  # 变量tt3所指的张量名字为Const0
print(tt3)

结果:

sp: scope1
var1: scope1/v:0
sp1: scope1
var2: scope2/v:0
var3: scope1/v3:0
v: scope/v:0
x.op: scope/bar/add
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值