tensorflow最简单的y=2*x+1的训练

import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#随机产生100个在(-1,1)之间的数字,和有误差的y
np.random.seed()
x_data=np.linspace(-1,1,100)
y_data=2*x_data+1.0+np.random.randn(*x_data.shape)*0.4#N(0,1),* 为拆包 x_data.shape=(100,)为一个元组,拆包之后为数字
plt.scatter(x_data,y_data)
plt.plot(x_data,1.0+2*x_data,color="red",linewidth=3)
#定义x,y变量,模型,训练十次,学习率为0.05,一般为0(0.1,0.01)根据经验
x=tf.placeholder("float",name="x")
y=tf.placeholder("float",name="y")
def model(x,w,b):
    return tf.multiply(x,w)+b
w=tf.Variable(1.0,name="w0")
b=tf.Variable(0.0,name="b0")
pred=model(x,w,b)
learning_rate=0.05
train_epochs=10
loss_founction=tf.reduce_mean(tf.square(y-pred))#均方差损失函数
optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_founction)#梯度下降
sess=tf.Session()
init=tf.global_variables_initializer()#变量初始化
sess.run(init)
step=0
loss_list=[]
display_step=10
for epoch in range(train_epochs):
    for xs,ys in zip(x_data,y_data):
        _,loss=sess.run([optimizer,loss_founction],feed_dict={x:xs,y:ys})#变量赋值
         step=step+1
        if step % display_step==0:
            print("Train Epoch","%02d"%(epoch+1),"Step:%03d"%(step),"loss=","{:.9f}".format(loss))

    b0temp=b.eval(session=sess)
    w0temp=w.eval(session=sess)

    plt.plot(x_data,w0temp*x_data+b0temp)

plt.show()

 

结果:

Train Epoch 01 Step:010 loss= 0.003854840
Train Epoch 01 Step:020 loss= 0.092242636
Train Epoch 01 Step:030 loss= 0.000011662
Train Epoch 01 Step:040 loss= 0.053957302
Train Epoch 01 Step:050 loss= 0.169548035
Train Epoch 01 Step:060 loss= 0.249787256
Train Epoch 01 Step:070 loss= 0.052432090
Train Epoch 01 Step:080 loss= 0.144824862
Train Epoch 01 Step:090 loss= 0.007296965
Train Epoch 01 Step:100 loss= 0.084371649
Train Epoch 02 Step:110 loss= 0.073044159
Train Epoch 02 Step:120 loss= 0.016456688
Train Epoch 02 Step:130 loss= 0.033787943
Train Epoch 02 Step:140 loss= 0.194783077
Train Epoch 02 Step:150 loss= 0.031424843
Train Epoch 02 Step:160 loss= 0.551763535
Train Epoch 02 Step:170 loss= 0.000003388
Train Epoch 02 Step:180 loss= 0.036442570
Train Epoch 02 Step:190 loss= 0.052268874
Train Epoch 02 Step:200 loss= 0.153601915
Train Epoch 03 Step:210 loss= 0.002324132
Train Epoch 03 Step:220 loss= 0.024705892
Train Epoch 03 Step:230 loss= 0.035549112
Train Epoch 03 Step:240 loss= 0.207767174
Train Epoch 03 Step:250 loss= 0.025149414
Train Epoch 03 Step:260 loss= 0.582034230
Train Epoch 03 Step:270 loss= 0.000295315
Train Epoch 03 Step:280 loss= 0.030603625
Train Epoch 03 Step:290 loss= 0.057934541
Train Epoch 03 Step:300 loss= 0.160382926
Train Epoch 04 Step:310 loss= 0.000868806
Train Epoch 04 Step:320 loss= 0.025478274
Train Epoch 04 Step:330 loss= 0.035699680
Train Epoch 04 Step:340 loss= 0.208881602
Train Epoch 04 Step:350 loss= 0.024651978
Train Epoch 04 Step:360 loss= 0.584624708
Train Epoch 04 Step:370 loss= 0.000353055
Train Epoch 04 Step:380 loss= 0.030134462
Train Epoch 04 Step:390 loss= 0.058425773
Train Epoch 04 Step:400 loss= 0.160961673
Train Epoch 05 Step:410 loss= 0.000778140
Train Epoch 05 Step:420 loss= 0.025543887
Train Epoch 05 Step:430 loss= 0.035712454
Train Epoch 05 Step:440 loss= 0.208975643
Train Epoch 05 Step:450 loss= 0.024610220
Train Epoch 05 Step:460 loss= 0.584843636
Train Epoch 05 Step:470 loss= 0.000358163
Train Epoch 05 Step:480 loss= 0.030094991
Train Epoch 05 Step:490 loss= 0.058467273
Train Epoch 05 Step:500 loss= 0.161010653
Train Epoch 06 Step:510 loss= 0.000770728
Train Epoch 06 Step:520 loss= 0.025549488
Train Epoch 06 Step:530 loss= 0.035713490
Train Epoch 06 Step:540 loss= 0.208983541
Train Epoch 06 Step:550 loss= 0.024606740
Train Epoch 06 Step:560 loss= 0.584862053
Train Epoch 06 Step:570 loss= 0.000358587
Train Epoch 06 Step:580 loss= 0.030091682
Train Epoch 06 Step:590 loss= 0.058470964
Train Epoch 06 Step:600 loss= 0.161014482
Train Epoch 07 Step:610 loss= 0.000770100
Train Epoch 07 Step:620 loss= 0.025549946
Train Epoch 07 Step:630 loss= 0.035713557
Train Epoch 07 Step:640 loss= 0.208984256
Train Epoch 07 Step:650 loss= 0.024606479
Train Epoch 07 Step:660 loss= 0.584863544
Train Epoch 07 Step:670 loss= 0.000358632
Train Epoch 07 Step:680 loss= 0.030091269
Train Epoch 07 Step:690 loss= 0.058471195
Train Epoch 07 Step:700 loss= 0.161015242
Train Epoch 08 Step:710 loss= 0.000770040
Train Epoch 08 Step:720 loss= 0.025550023
Train Epoch 08 Step:730 loss= 0.035713624
Train Epoch 08 Step:740 loss= 0.208984360
Train Epoch 08 Step:750 loss= 0.024606405
Train Epoch 08 Step:760 loss= 0.584863901
Train Epoch 08 Step:770 loss= 0.000358637
Train Epoch 08 Step:780 loss= 0.030091351
Train Epoch 08 Step:790 loss= 0.058471195
Train Epoch 08 Step:800 loss= 0.161015242
Train Epoch 09 Step:810 loss= 0.000770034
Train Epoch 09 Step:820 loss= 0.025550023
Train Epoch 09 Step:830 loss= 0.035713624
Train Epoch 09 Step:840 loss= 0.208984360
Train Epoch 09 Step:850 loss= 0.024606405
Train Epoch 09 Step:860 loss= 0.584863901
Train Epoch 09 Step:870 loss= 0.000358637
Train Epoch 09 Step:880 loss= 0.030091351
Train Epoch 09 Step:890 loss= 0.058471195
Train Epoch 09 Step:900 loss= 0.161015242
Train Epoch 10 Step:910 loss= 0.000770034
Train Epoch 10 Step:920 loss= 0.025550023
Train Epoch 10 Step:930 loss= 0.035713624
Train Epoch 10 Step:940 loss= 0.208984360
Train Epoch 10 Step:950 loss= 0.024606405
Train Epoch 10 Step:960 loss= 0.584863901
Train Epoch 10 Step:970 loss= 0.000358637
Train Epoch 10 Step:980 loss= 0.030091351
Train Epoch 10 Step:990 loss= 0.058471195
Train Epoch 10 Step:1000 loss= 0.161015242

 

  • 1
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值