一个入门的简单的训练
x_data = np.random.rand(100)
y_data = x_data * 0.1 + 0.2
b = tf.Variable(0.)
k = tf.Variable(0.)
y = x_data * k + b
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.2)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(los)
for step in range(201):
sess.run(train)
if step % 20 == 0:
print(step, sess.run([k, b]))
一个简单的神经网络做线性回归
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data = np.linspace(-0.5, 0.5, 200)[:, np.newaxis]
print(x_data.shape)
noise = np.random.normal(0, 0.02, x_data.shape)
print(noise.shape)
y_data = np.square(x_data) + noise
x = tf.placeholder(tf.float32, [None, 1])
y = tf.placeholder(tf.float32, [None, 1])
weigthts_l1 = tf.Variable(tf.random_normal([1, 10]))
biases_l1 = tf.Variable(tf.zeros([1, 10]))
wx_plus_b_l1 = tf.matmul(x, weigthts_l1) + biases_l1
l1 = tf.nn.tanh(wx_plus_b_l1)
weights_l2 = tf.Variable(tf.random_normal([10, 1]))
biases_l2 = tf.Variable(tf.zeros([1, 1]))
wx_plus_b_l2 = tf.matmul(l1, weights_l2) + biases_l2
prediction = tf.nn.tanh(wx_plus_b_l2)
loss = tf.reduce_mean(tf.square(prediction - y))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2000):
sess.run(train, feed_dict = {x:x_data, y:y_data})
l1_value = sess.run(wx_plus_b_l1, feed_dict = {x:x_data})
print(l1_value)
prediction_value = sess.run(prediction, feed_dict = {x:x_data})
plt.figure()
plt.scatter(x_data, y_data)
plt.plot(x_data, prediction_value, 'r-', lw=5)
plt.show()