喝杯82年的枸杞水,做做笔记,方便查看
import tensorflow as tf
import numpy as np
x_data = np.float32(np.random.rand(2 , 100 ))
y_data = np.dot([0.100 , 0.200 ], x_data) + 0.300
b = tf.Variable(tf.zeros([1 ]))
W = tf.Variable(tf.random_uniform([1 , 2 ], -1.0 , 1.0 ))
y = tf.matmul(W, x_data) + b
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5 )
train = optimizer.minimize(loss)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in xrange(0 , 201 ):
sess.run(train)
if step % 20 == 0 :
print step, sess.run(W), sess.run(b)
WARNING:tensorflow:From /home/weigeng/anaconda2/lib/python2.7/site-packages/tensorflow/python/util/tf_should_use.py:107: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
0 [[-0.00221175 0.8407715 ]] [0.0390307]
20 [[0.11183201 0.34245312]] [0.22016391]
40 [[0.1092842 0.234086 ]] [0.2773372]
60 [[0.10342202 0.20855658]] [0.29370892]
80 [[0.10106204 0.20221566]] [0.29827392]
100 [[0.10030775 0.20058464]] [0.29952937]
120 [[0.10008632 0.20015599]] [0.2998721]
140 [[0.10002381 0.20004189]] [0.29996532]
160 [[0.10000651 0.20001128]] [0.2999906]
180 [[0.10000179 0.20000306]] [0.29999745]
200 [[0.1000005 0.20000085]] [0.2999993]
test =tf.random_uniform([4 ,4 ],-2 ,2 )
test2 = tf.random_normal([4 ,4 ],0 ,1 )
with tf.Session() as sess:
print sess.run(test)
print '\n'
print sess.run(test2)
[[-1.483726 -1.8030691 -0.88796806 1.350472 ]
[ 0.52741146 0.86454153 -1.935019 -0.31493664]
[ 1.610815 0.30505228 0.17704058 0.8154669 ]
[ 1.7214847 -0.8812132 1.031498 -1.1097679 ]]
[[-0.98970735 0.8842772 0.39722115 0.4981374 ]
[-0.15018556 0.5516848 -1.375233 -0.86003536]
[ 0.12483446 0.48834983 0.9083735 0.7975909 ]
[-0.21340893 -0.05240062 -0.47427958 0.39051178]]
笔记
class GradientDescentOptimizer(tensorflow.python.training.optimizer.Optimizer)
| Optimizer that implements the gradient descent algorithm.
|
| Method resolution order:
| GradientDescentOptimizer
| tensorflow.python.training.optimizer.Optimizer
| __builtin__.object
|
| Methods defined here:
|
| __init__(self, learning_rate, use_locking=False, name='GradientDescent')
| Construct a new gradient descent optimizer.
|
| Args:
| learning_rate: A Tensor or a floating point value. The learning
| rate to use.
| use_locking: If True use locks for update operations.
| name: Optional name prefix for the operations created when applying
| gradients. Defaults to "GradientDescent".
例子 tf.reduce_mean() tf.reduce_xx
x= tf.constant([1 ,2 ,3 ,4 ])
x1 =tf.constant([[1 ,2 ,3 ],[4 ,5 ,6 ]])
a=tf.reduce_mean(x)
b= tf.reduce_max(x)
c= tf.reduce_mean(x1,0 )
d= tf.reduce_mean(x1,1 )
e=tf.reduce_max(x1,0 )
sees = tf.Session()
sees.run(a)
2
sees.run(b)
4
sees.run(c)
array([2, 3, 4], dtype=int32)
sees.run(d)
array([2, 5], dtype=int32)
sees.run(e)
array([4, 5, 6], dtype=int32)
sees.close()