# -*- coding: utf-8 -*-
"""
This is a practise of tensorflow
TIME: 2017/5/2
Author: Eric Lv
E-mail: Eric2014_Lv@sjtu.edu.cn
"""
import tensorflow as tf
import numpy as np
# 创建100个假点x y其中满足 y = x * 0.1 + 0.3
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 + 0.3
# 尝试去找到W和b,使得对于模拟出来的数据y_data和x_data满足的线性关系:y_data = W * x_data + b
# 我们都知道其实W = 0.1; b = 0.3
# 我们希望用tensorflow来求
W = tf.Variable(tf.random_uniform([1],-1.0,1.0))
b = tf.Variable(tf.zeros([1]))
y = W * x_data + b
# 最小化MSE:最小值均方误差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# 开始之前,必须初始变量 我们将会首先运行(run)它
init = tf.global_variables_initializer()
# init = tf.initialize_all_variables()
# 启动图
sess = tf.Session()
sess.run(init) # very important as a start
# 开始优化
for step in range(2000):
sess.run(train)
if step % 100 == 0:
print(step,": W is ", sess.run(W), "b is ", sess.run(b))
自己尝试手写的:
# -*- coding: utf-8 -*-
"""
This is a practise of tensorflow
TIME: 2017/5/2
Author: Eric Lv
E-mail: Eric2014_Lv@sjtu.edu.cn
"""
import tensorflow as tf
import numpy as np
# creat x_data y_data
x_data = np.random.rand(100).astype(np.float32)
y_data = 0.1 * x_data + 0.3
## start the tensorflow structure
weight = tf.Variable(tf.random_uniform([1],-2.0,2.0))
bias = tf.Variable(tf.zeros([1]))
y = weight * x_data + bias
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.8) #创造优化器
train = optimizer.minimize(loss) #利用优化器解决问题
## end the tensorflow structure
# initialize all variables
init = tf.global_variables_initializer()
# session start
sess = tf.Session()
sess.run(init)
for i in range(2001):
sess.run(train)
if i % 100 == 0:
print(i, "W is ",sess.run(weight), "b is ",sess.run(bias))