tensorflow-basic---线性回归

"""
Created on Sat Jul  7 17:30:15 2018

@author: 

"""

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt

#随机生成1000个点,围绕再y=0.1x+0.3的直线范围
num_points=1000
vectors_set=[]
for i in range(num_points):
    x1=np.random.normal(0.0,0.55)#生成一个均值事0.0,方差事0.55的高斯分布
    y1=x1*0.1+0.3+np.random.normal(0.0,0.03)#wx+b  后面的事加一些抖动
    vectors_set.append([x1,y1])


#生成一些样本
x_data=[v[0] for v in vectors_set]
y_data=[v[1] for v in vectors_set]

plt.scatter(x_data,y_data,c='r')
plt.show()

 W=tf.Variable(tf.random_uniform([1],-1.0,1.0),name='W')
#生成1维的b矩阵,初始值是0
b=tf.Variable(tf.zeros([1]),name='b')
#经过计算得出预估值y
y=W * x_data + b

#以预估值y和实际值y_data之间的均方差作为损失
loss=tf.reduce_mean(tf.square(y-y_data),name='loss')
#采用梯度下降法来优化参数
optimizer=tf.train.GradientDescentOptimizer(.5)
#训练的过程就是最小化这个误差值
train=optimizer.minimize(loss,name='train')

sess=tf.Session()
#全局变量初始化
init=tf.global_variables_initializer()
sess.run(init)

#初始化的W和b是多少
print(W,b,loss)
print ("W=",sess.run(W),"b=",sess.run(b),"loss=",sess.run(loss))

#执行20次训练
for step in range(20):
    sess.run(train)
    #输出训练号的W和b
    print ("W=",sess.run(W),"b=",sess.run(b),"loss=",sess.run(loss))

<tf.Variable 'W:0' shape=(1,) dtype=float32_ref> <tf.Variable 'b:0' shape=(1,) dtype=float32_ref> Tensor("loss:0", shape=(), dtype=float32)
W= [-0.6047149] b= [0.] loss= 0.22027968
W= [-0.41722918] b= [0.2925514] loss= 0.072782986
W= [-0.27756947] b= [0.29451796] loss= 0.03905845
W= [-0.17580093] b= [0.2959829] loss= 0.021150958
W= [-0.101643] b= [0.2970504] loss= 0.011642213
W= [-0.04760466] b= [0.29782826] loss= 0.006593141
W= [-0.00822733] b= [0.29839507] loss= 0.00391212
W= [0.02046664] b= [0.29880813] loss= 0.0024885177
W= [0.0413757] b= [0.2991091] loss= 0.0017325951
W= [0.05661198] b= [0.29932842] loss= 0.0013312061
W= [0.06771455] b= [0.29948825] loss= 0.0011180713
W= [0.0758049] b= [0.29960468] loss= 0.0010048981
W= [0.08170028] b= [0.29968956] loss= 0.0009448042
W= [0.0859962] b= [0.2997514] loss= 0.00091289484
W= [0.0891266] b= [0.29979646] loss= 0.000895951
W= [0.0914077] b= [0.2998293] loss= 0.0008869541
W= [0.09306993] b= [0.29985324] loss= 0.00088217674
W= [0.09428117] b= [0.29987067] loss= 0.0008796401
W= [0.0951638] b= [0.29988337] loss= 0.000878293
W= [0.09580696] b= [0.29989263] loss= 0.00087757764
W= [0.09627564] b= [0.29989937] loss= 0.000877198

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值