Tensorflow 实现线性回归

import matplotlib.pyplot as plt;
import numpy as np;
import tensorflow as tf;

class LinearRegression:
    def __init__(self,datalength=1000,w=2,b=0.5):
        self.length = datalength;
        self.w = w;
        self.b = b;
    def CreateDataSet(self):
        self.simx = np.linspace(0, 1.0, self.length).astype(np.float32);#np.random.randn(self.length);
        self.simy = self.w * self.simx + self.b + np.random.randn(self.length) * 0.15;
        self.simx = np.reshape(self.simx, newshape=[self.length,1]);
        self.simy = np.reshape(self.simy,newshape=[self.length,1]);
    def Train(self):
        W = tf.Variable(tf.abs(tf.random_normal([1])));
        b = tf.Variable(tf.random_normal([1]));
        
        x = tf.placeholder(tf.float32, name='x')
        y = tf.placeholder(tf.float32, name='y'
        
        pred_y = tf.add(tf.multiply(x,W),b);
        sub_loss = tf.abs(pred_y - y);
        #why here use pow not sqrt. it is easy overflow when use sqrt when the value is very small.
        sqrt_loss = (tf.pow(sub_loss,2));
        # why here use mean, not use sum
        loss = tf.reduce_mean(0.5*sqrt_loss);
        
        optimizer = tf.train.GradientDescentOptimizer(0.01).minimize(loss);
        
        init = tf.global_variables_initializer();
        with tf.Session() as sess:
            sess.run(init);
            feed = {x:self.simx,y:self.simy};
            
            print ("w:",sess.run(W),"b:",sess.run(b),"loss:",sess.run(loss,feed));
            '''
            print (sess.run(pred_y,feed));
            print (sess.run(sub_loss,feed));
            print (sess.run(sqrt_loss,feed))
            '''
            for i in range(10000):
                feed = {x:self.simx,y:self.simy};
                sess.run(optimizer,feed_dict = feed);
                if (i%1000==0):
                    print ("w:",sess.run(W),"b:",sess.run(b),"loss:",sess.run(loss,feed));
            self.pred_w = sess.run(W);
            self.pred_b = sess.run(b);
    def Show(self):
        plt.figure();
        plt.scatter(self.simx, self.simy, c='g', marker='o');
        plt.plot(self.simx,self.simx * self.pred_w + self.pred_b);
        plt.show();
if __name__ == "__main__":
    lr = LinearRegression();
    lr.CreateDataSet();
    lr.Train();
    lr.Show();

线性回归原理:

1.模型:y = wx +b

2 损失函数:Loss = minimize(∑(y-y1)²)

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值