import tensorflow as tf
import numpy as np
"""
演示numpy 和tensorflow的区别
"""
np.random.seed(42)
x_data = np.random.rand(100).astype(np.float32)
y_data = x_data * 0.1 +0.3
print('x_data.shape:',x_data.shape)
print('y_data:',y_data)
with tf.Graph().as_default():
w = tf.Variable(initial_value=tf.random_uniform([1],-1.0,1.0))
b = tf.Variable(initial_value=tf.zeros([1]))
y_pred = x_data * w +b
loss = tf.reduce_mean(tf.square(y_pred - y_data))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_opt = optimizer.minimize(loss=loss)
print(w,b,loss,train_opt)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(1,201):
_,loss_ = sess.run([train_opt,loss])
if e % 20 == 0:
print('Epoch:{} - Train Loss:{:.5f}'.format(e,loss_))
print(sess.run(w),sess.run(b))
x_data.shape: (100,)
y_data: [0.33745402 0.39507145 0.3731994 0.35986587 0.3156019 0.31559947
0.30580837 0.38661763 0.3601115 0.3708073 0.30205846 0.396991
0.38324428 0.32123393 0.3181825 0.31834045 0.33042425 0.35247564
0.3431945 0.32912293 0.3611853 0.3139494 0.32921448 0.3366362
0.345607 0.37851763 0.3199674 0.35142344 0.35924146 0.30464506
0.3607545 0.31705242 0.30650517 0.39488858 0.39656323 0.38083977
0.33046138 0.30976722 0.3684233 0.34401527 0.31220382 0.3495177
0.30343887 0.39093205 0.32587802 0.36625224 0.33117113 0.35200682
0.35467103 0.31848547 0.39695847 0.3775133 0.39394993 0.38948274
0.35979 0.39218745 0.30884928 0.3195983 0.30452275 0.33253303
0.33886775 0.3271349 0.38287377 0.33567536 0.32809347 0.35426962
0.31409243 0.3802197 0.30745506 0.3986887 0.3772245 0.31987157
0.30055222 0.38154617 0.37068576 0.37290072 0.37712705 0.3074045
0.33584657 0.31158692 0.38631037 0.3623298 0.33308983 0.30635583
0.33109826 0.33251834 0.37296063 0.36375576 0.3887213 0.3472215
0.31195945 0.37132448 0.37607852 0.35612774 0.37709674 0.34937957
0.3522733 0.34275413 0.3025419 0.31078917]
<tf.Variable 'Variable:0' shape=(1,) dtype=float32_ref> <tf.Variable 'Variable_1:0' shape=(1,) dtype=float32_ref> Tensor("Mean:0", shape=(), dtype=float32) name: "GradientDescent"
op: "NoOp"
input: "^GradientDescent/update_Variable/ApplyGradientDescent"
input: "^GradientDescent/update_Variable_1/ApplyGradientDescent"
Epoch:20 - Train Loss:0.19688
[-0.5349532] [0.20760678]
Epoch:40 - Train Loss:0.08353
[-0.45615944] [0.33085623]
Epoch:60 - Train Loss:0.04132
[-0.40364257] [0.40308142]
Epoch:80 - Train Loss:0.02525
[-0.3671751] [0.44446975]
Epoch:100 - Train Loss:0.01880
[-0.34055865] [0.46724656]
Epoch:120 - Train Loss:0.01591
[-0.32003522] [0.47881463]
Epoch:140 - Train Loss:0.01435
[-0.30332527] [0.48365596]
Epoch:160 - Train Loss:0.01331
[-0.28904456] [0.48448348]
Epoch:180 - Train Loss:0.01248
[-0.27635142] [0.48293874]
Epoch:200 - Train Loss:0.01176
[-0.26473287] [0.4800143]
Process finished with exit code 0