import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#图形显示
def moving_average(a,w=10):
if len(a)<w:
return a[:]
return [val if idx <w else sum(a[(idx-w):idx])/w for idx,val in enumerate(a)]
tf.reset_default_graph()
X = tf.placeholder("float")
Y = tf.placeholder("float")
# 占位符
inputsict = {
'x': tf.placeholder("float"),
'y': tf.placeholder("float")
}
# 生成模拟数据
train_X = np.float32(np.linspace(-1, 1, 100))
train_Y=2*train_X+np.random.randn(*train_X.shape)*0.1 #y=2x
plt.plot(train_X,train_Y,'ro',label='data')
plt.show()
#重置图
#tf.reset_default_graph()
#直接定义模型参数
W=tf.Variable(tf.random_normal([1]),name="weight")
b=tf.Variable(tf.zeros([1]),name="bias")
#字典定义学习参数
paradict={
'W':tf.Variable(tf.random_normal([1])),
'b':tf.Variable(tf.zeros([1]))
}
#前向结构
Z=tf.multiply(X,paradict['W'])+paradict['b']
#反向传播优化:
cost=tf.reduce_mean(tf.square(Y-Z))
learning_rate=0.01
optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
#初始化所有变量
init =tf.global_variables_initializer()
#定义参数
training_epochs=20
display_step=2
saver=tf.train.Saver()
savedir="D:/data_savelog/"
#启动Session
with tf.Session() as sess:
sess.run(init)
plotdata={"batchsize":[],"loss":[]}
#向模型输入数据
for epoch in range(training_epochs):
for (x,y) in zip(train_X,train_Y):
sess.run(optimizer,feed_dict={X:x,Y:y})
#显示训练中的详细信息
if epoch % display_step==0:
loss=sess.run(cost,feed_dict={X:train_X,Y:train_Y})
print("Epoch:",epoch+1,"cost=",loss,"W=",sess.run(paradict['W']),
"b=",sess.run(paradict['b']))
if not (loss=="NA"):
plotdata["batchsize"].append(epoch)
plotdata["loss"].append(loss)
print("Finished!")
saver.save(sess,savedir+"/linermodel.cpkt")
print("cost=",sess.run(cost,feed_dict={X:train_X,Y:train_Y}),
"W=",sess.run(paradict['W']),"b=",sess.run(paradict['b']))
# 重置图
print(sess.run(paradict['b']))
with tf.Session() as sess2:
sess2.run(tf.global_variables_initializer())
saver.restore(sess2,savedir+"linermodel.cpkt")
print("x=0.2,Z=",sess2.run(Z,feed_dict={X:0.2}))