#!/usr/bin/env python
#-*- coding=utf-8 -*-
import numpy as np
import pandas as pd
import sklearn as sk
import tensorflow as tf
x_data = pd.read_csv()
y_data = pd.read_csv()
reg = sk.liner_model.LinearRegression()
reg.fit(x_data.reshape(-1, 1), y_data)
learning_rate = 0.1
w = tf.get_variable("weight", [-1, 1], tf.float32, tf.random_uniform([1], -1.0, 1.0))
b = tf.get_variable("bias",[-1, 1], tf. float32, tf.zeros([1]))
y = w*x_data +b
loss = tf.reduce_mean(tf.square(y - y_data)) / 2
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(loss)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
for step in range(1000):
sess.run(train)
转载于:https://www.cnblogs.com/superqing/p/8659919.html