TensorFlow实现线性回归(包括三个例子)

TensorFlow实现线性回归(包括三个例子)

flyfish

原理还包括Python和C++分别实现线性回归
PyTorch实现线性回归(推荐的版本包括模型训练、保存、推理使用等)
PyTorch版本的线性回归源码下载地址

TensorFlow实现线性回归的第一个例子

import numpy as np
import tensorflow as tf

# 线性回归模型 y = Wx + b
x = tf.placeholder(tf.float32, [None, 1])
W = tf.Variable(tf.zeros([1,1]))
b = tf.Variable(tf.zeros([1]))
product = tf.matmul(x,W)
y = product + b
y_ = tf.placeholder(tf.float32, [None, 1])

# 成本函数 最小二乘法
cost = tf.reduce_mean(tf.square(y_-y))


#使用梯度下降训练以降低成本
train_step = tf.train.GradientDescentOptimizer(0.0000001).minimize(cost)

sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
steps = 1000
for i in range(steps):
  #为y = W.x + b , W = 2, b = 0模型创建模拟数据
  xs = np.array([[i]])
  ys = np.array([[2*i]])
  # Train
  feed = { x: xs, y_: ys }
  sess.run(train_step, feed_dict=feed)
  print("After %d iteration:" % i)
  print("W: %f" % sess.run(W))
  print("b: %f" % sess.run(b))

  print("cost: %f" % sess.run(cost, feed_dict=feed))

# W 应该接近 2,  b 应该接近 0

After 999 iteration:
W: 1.999989
b: 0.010990
cost: 0.000000

feed 是字典类型
字典由多个键及与其对应的值构成的对组成(把键值对成为项),
每个键和它的值之间用冒号(:)隔开,项之间用逗号(,)隔开,
而整个字典由一对大括号括起来。空字典由两个大括号组成:{}

TensorFlow实现线性回归的第二个例子

import tensorflow as tf

W = tf.Variable(tf.zeros([2, 1]), name="weights")
b = tf.Variable(0., name="bias")

def inference(X):
    return tf.matmul(X, W) + b

def loss(X, Y):
    Y_predicted = inference(X)
    return tf.reduce_sum(tf.squared_difference(Y, Y_predicted))

def inputs():
    weight_age = [[84, 46], [73, 20], [65, 52], [70, 30], [76, 57], [69, 25], [63, 28], [72, 36], [79, 57], [75, 44],
                  [27, 24], [89, 31], [65, 52], [57, 23], [59, 60], [69, 48], [60, 34], [79, 51], [75, 50], [82, 34],
                  [59, 46], [67, 23], [85, 37], [55, 40], [63, 30]]

    blood_fat_content = [354, 190, 405, 263, 451, 302, 288, 385, 402, 365, 209, 290, 346, 254, 395, 434, 220, 374, 308,
                         220, 311, 181, 274, 303, 244]

    return tf.to_float(weight_age), tf.to_float(blood_fat_content)

def train(total_loss):
    learning_rate = 0.0000001
    return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)


def evaluate(sess, X, Y):
    print(sess.run(inference([[80., 25.]])), sess.run(inference([[65., 25.]])))  # [[320.6497]] [[267.78183]]

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    X, Y = inputs()
    train_op = train(loss(X, Y))

    for step in range(1000):
        sess.run([train_op])

    evaluate(sess, X, Y)
    sess.close()

TensorFlow实现线性回归的第三个例子

import tensorflow as tf
import numpy as np

UCI_TRAIN_DATA = None
UCI_TEST_DATA = None
UCI_LABEL_DATA=None

feature_names = [
    'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
    'PTRATIO', 'B', 'LSTAT', 'convert'
]

def load_data(filename, feature_num=14, ratio=0.8):

    #加载数据,数据之间由空格隔开
    data = np.fromfile(filename, sep=' ')
    print(data.shape)
    print(data.shape[0])#7084
    print(data.shape[0] / feature_num)#506
    data = data.reshape( int(data.shape[0] / feature_num), feature_num)
    print(data.shape)#(506, 14)


    maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
        axis=0) / data.shape[0]

    for i in range(feature_num - 1):
        data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])

    #整理训练集与测试集
    offset = int(data.shape[0] * ratio)
    global UCI_TRAIN_DATA
    global UCI_TEST_DATA
    global UCI_LABEL_DATA

    UCI_TRAIN_DATA = data[:offset]#取前80行
    UCI_TEST_DATA = data[offset:]#取后20%行
    i=feature_num-1;
    data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
    UCI_LABEL_DATA = data[:offset, 13:14]#取前80行的最后一列

load_data(r"H:\1.data")


x_data = tf.cast(UCI_TRAIN_DATA,dtype=tf.float32)
y_data =tf.cast(UCI_LABEL_DATA,dtype=tf.float32)
print(x_data.shape)


W= tf.Variable(tf.zeros([14,1]), name="weights",dtype=tf.float32)
# create a variable for biases
b = tf.Variable(0.0, name="biases",dtype=tf.float32)

y=tf.add(tf.matmul( x_data,W) ,b)

# 最小化均方误差
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.0000001)  # 学习率为0.0001的梯度下降法
train = optimizer.minimize(loss)

# 初始化TensorFlow参数
init = tf.global_variables_initializer()

# 运行数据流图
sess = tf.Session()
sess.run(init)

# 多次迭代,w和b的拟合值
for step in range(1000):
    sess.run(train)

print( sess.run(W), sess.run(b))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

二分掌柜的

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值