#!/usr/bin/env python
Y = tf.placeholder("float")
def model(X, w):
return tf.mul(X, w) # lr is just X*w so this model line is pretty simple
w = tf.Variable(0.0, name="weights") # create a shared variable (like theano.shared) for the weight matrix
y_model = model(X, w)
//方差
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # construct an optimizer to minimize cost and fit line to my data
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize variables (in this case just variable W)
tf.initialize_all_variables().run()
sess.run(train_op, feed_dict={X: x, Y: y})
print(sess.run(w)) # It should be something around 2
import tensorflow as tf
//导入numpy科学计算包
//numpy.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None),分配从-1到1中间的101个数,包含-1和1,构成一个线性数组trX
trX = np.linspace(-1, 1, 101)
//randn 返回一个样本,具有标准正态分布,ndarray.shape:数组的维度,numpy的array就是一个矩阵,不同于python中的list,tuple,dict,set
//placeholder,占位符
Y = tf.placeholder("float")
def model(X, w):
return tf.mul(X, w) # lr is just X*w so this model line is pretty simple
w = tf.Variable(0.0, name="weights") # create a shared variable (like theano.shared) for the weight matrix
y_model = model(X, w)
//方差
cost = tf.square(Y - y_model) # use square error for cost function
//梯度下降类,最小目标cost,学习率0.01
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(cost) # construct an optimizer to minimize cost and fit line to my data
# Launch the graph in a session
with tf.Session() as sess:
# you need to initialize variables (in this case just variable W)
tf.initialize_all_variables().run()
for i in range(100):
//python的zip函数,对矩阵求T,如([1,2],[3,4])---->([1,3],[2,4])
sess.run(train_op, feed_dict={X: x, Y: y})
print(sess.run(w)) # It should be something around 2