tensorflow 官网的资料学习笔记(-)

喝杯82年的枸杞水,做做笔记,方便查看

import tensorflow as tf
import numpy as np

# 使用 NumPy 生成假数据(phony data), 总共 100 个点.
x_data = np.float32(np.random.rand(2, 100)) # 随机输入
y_data = np.dot([0.100, 0.200], x_data) + 0.300

# 构造一个线性模型
# 
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b

# 最小化方差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)

# 初始化变量
init = tf.initialize_all_variables()

# 启动图 (graph)
sess = tf.Session()
sess.run(init)

# 拟合平面
for step in xrange(0, 201):
    sess.run(train)
    if step % 20 == 0:
        print step, sess.run(W), sess.run(b)

WARNING:tensorflow:From /home/weigeng/anaconda2/lib/python2.7/site-packages/tensorflow/python/util/tf_should_use.py:107: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
0 [[-0.00221175  0.8407715 ]] [0.0390307]
20 [[0.11183201 0.34245312]] [0.22016391]
40 [[0.1092842 0.234086 ]] [0.2773372]
60 [[0.10342202 0.20855658]] [0.29370892]
80 [[0.10106204 0.20221566]] [0.29827392]
100 [[0.10030775 0.20058464]] [0.29952937]
120 [[0.10008632 0.20015599]] [0.2998721]
140 [[0.10002381 0.20004189]] [0.29996532]
160 [[0.10000651 0.20001128]] [0.2999906]
180 [[0.10000179 0.20000306]] [0.29999745]
200 [[0.1000005  0.20000085]] [0.2999993]
test =tf.random_uniform([4,4],-2,2)
test2 = tf.random_normal([4,4],0,1)
with tf.Session() as sess:
    print sess.run(test)
    print '\n'
    print sess.run(test2)
[[-1.483726   -1.8030691  -0.88796806  1.350472  ]
 [ 0.52741146  0.86454153 -1.935019   -0.31493664]
 [ 1.610815    0.30505228  0.17704058  0.8154669 ]
 [ 1.7214847  -0.8812132   1.031498   -1.1097679 ]]


[[-0.98970735  0.8842772   0.39722115  0.4981374 ]
 [-0.15018556  0.5516848  -1.375233   -0.86003536]
 [ 0.12483446  0.48834983  0.9083735   0.7975909 ]
 [-0.21340893 -0.05240062 -0.47427958  0.39051178]]

笔记

class GradientDescentOptimizer(tensorflow.python.training.optimizer.Optimizer)
 |  Optimizer that implements the gradient descent algorithm.
 |  
 |  Method resolution order:
 |      GradientDescentOptimizer
 |      tensorflow.python.training.optimizer.Optimizer
 |      __builtin__.object
 |  
 |  Methods defined here:
 |  
 |  __init__(self, learning_rate, use_locking=False, name='GradientDescent')
 |      Construct a new gradient descent optimizer.
 |      
 |      Args:
 |        learning_rate: A Tensor or a floating point value.  The learning
 |          rate to use.
 |        use_locking: If True use locks for update operations.
 |        name: Optional name prefix for the operations created when applying
 |          gradients. Defaults to "GradientDescent".

例子 tf.reduce_mean() tf.reduce_xx

x= tf.constant([1,2,3,4])
x1 =tf.constant([[1,2,3],[4,5,6]])
a=tf.reduce_mean(x)
b= tf.reduce_max(x)
c= tf.reduce_mean(x1,0)
d= tf.reduce_mean(x1,1)
e=tf.reduce_max(x1,0)
sees = tf.Session()
sees.run(a)
2
sees.run(b)
4
sees.run(c)
array([2, 3, 4], dtype=int32)
sees.run(d)
array([2, 5], dtype=int32)

sees.run(e)
array([4, 5, 6], dtype=int32)
sees.close()
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值