2.线性回归(Tensorflow)

1.代码是基于上一节做了一些拓展,加上了1.数据模型保存与重新加载.2.Tensorboard可视化。

import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

# 存放数据的字典
plotdata = {"batchsize":[],"loss":[]}

# 计算loss的 很诡异
def moving_average(a,W=10):
    return [data if i<W else sum (a[i-W:i])/W for i,data in enumerate(a) ]

# 定义注入 X 和 Y
X = tf.placeholder(float)
Y = tf.placeholder(float)

# 创建数据集
train_x = np.linspace(-1,1,100)
train_y = 2 *train_x +np.random.randn(*train_x.shape)*0.3
# plt.plot(train_x,train_y,"ro",label="original data")
# plt.legend()
# plt.show()

# 定义前向传播图
w = tf.Variable(tf.truncated_normal([1]),tf.float32)
b = tf.Variable(tf.zeros([1]))
y = tf.multiply(w,X)+b
# 添加 代码
tf.summary.histogram("y",y)
# 反向传播
loss = tf.reduce_mean(tf.square(y-Y))
tf.summary.scalar("loss_function",loss)
learn_rate = 0.01
optimizer = tf.train.GradientDescentOptimizer(learn_rate).minimize(loss)


if __name__ == "__main__":
    # 初始化 开始训练
    init = tf.global_variables_initializer()
    saver = tf.train.Saver(max_to_keep=2)
    save_dir = "./model/"
    with tf.Session() as sess:
        sess.run(init)
        merged_summary_op = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter("./model/mnist_with",sess.graph)
        for i in range(20):
            for (x_,y_) in zip(train_x,train_y):
                sess.run(optimizer,feed_dict={X:x_,Y:y_})
                summary_str = sess.run(merged_summary_op,feed_dict={X:x_,Y:y_})
                summary_writer.add_summary(summary_str,i)
            if i %2 ==0:
                saver.save(sess,save_dir+"model.ckpt",global_step=i)
                cost = sess.run(loss,feed_dict={X:train_x,Y:train_y})
                print("It's {} epoch loss is {}".format((i+1),cost))
                plotdata["loss"].append(cost)
                plotdata["batchsize"].append(i)

        print("x =0.2 y = {}".format(sess.run(y,feed_dict={X:0.2})))

        plt.plot(train_x,train_y,"ro",label ="original data")
        plt.plot(train_x,sess.run(w)*train_x+sess.run(b),label="train data")
        plt.legend()
        plt.show()

        plotdata["avgloss"] = moving_average(plotdata["loss"])
        print( plotdata["avgloss"])
        plt.figure(1)
        plt.subplot(211)
        plt.plot(plotdata["batchsize"],plotdata["avgloss"],"b--")
        plt.xlabel("batch")
        plt.ylabel("Loss")
        plt.title("batch and Loss")
        plt.show()

  1. 模型重载
import tensorflow as tf
import linear_04

if __name__ == "__main__":
    init = tf.global_variables_initializer()
    saver = tf.train.Saver()
    save_dir ="./model/"
    with tf.Session() as sess:
        sess.run(init)
        saver.restore(sess,save_dir+"model.ckpt-"+str(18))
        result = sess.run(linear_04.y,feed_dict={linear_04.X:1})
        print("The result is {}".format(result))


  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值