import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
sess = tf.Session()
#batch gradient descent
x_vals = np.random.normal(1,0.1,100).reshape([100,1])
y_vals = np.repeat(10.0,100).reshape([100,1])
#model placehold
x_data = tf.placeholder(dtype=tf.float32,shape=[None,1])
y_target = tf.placeholder(dtype=tf.float32,shape=[None,1])
w = tf.Variable(tf.random_normal(shape=[1,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
#cost function
y_hat = tf.add(tf.matmul(x_data,w),b)
#print(tf.reduce_mean(tf.square(x_vals - y_vals)))
loss = tf.reduce_mean(tf.square(y_hat - y_target))
#init variable
init = tf.global_variables_initializer()
sess.run(init)
#opt
my_opt = tf.train.GradientDescentOptimizer(0.02)
train_step = my_opt.minimize(loss)
#batch gradient descent
batch_cache = []
for i in range(100):
sess.run(train_step,feed_dict={x_data:x_vals,y_ta
tensorflow实践系列 -- 线性规划 (batch and stochastic)
最新推荐文章于 2022-06-15 23:42:44 发布