简单的全连接网络

import tensorflow as tf
import numpy as np

x = np.linspace(0,10,100)[:,None]
Y = 0.3*x+0.4
#计算图
tf_x = tf.placeholder(tf.float32,(None,1))
tf_y = tf.placeholder(tf.float32,(None,1))

w1 = tf.Variable(tf.random_normal([1,2]))
w2 = tf.Variable(tf.random_normal([2,2]))
w3 = tf.Variable(tf.random_normal([2,1]))
baise1 = tf.Variable(tf.random_normal([1]))
baise2 = tf.Variable(tf.random_normal([1]))
baise3 = tf.Variable(tf.random_normal([1]))

a = tf.matmul(tf_x,w1)+baise1
b = tf.matmul(a,w2)+baise2
y = tf.matmul(b,w3)+baise3

loss = tf.reduce_mean(tf.square(y-tf_y))
train = tf.train.GradientDescentOptimizer(0.001).minimize(loss)
#训练,主要涉及每一步怎么喂数据,进行循环
with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    
    steps =1000
    for i in range(steps):
        sess.run(train,feed_dict={tf_x:x,tf_y:Y})
        if i%100==0:
            print('loss/n',sess.run(loss,feed_dict={tf_x:x,tf_y:Y})) #这是一个图的计算记过,如果不喂给,就返回的
            #是另外的了
            print('w1/n',sess.run(w2))  #这是一个参数
        

添加指数衰减学习率:

import tensorflow  as tf
import numpy as np
import matplotlib.pyplot as plt

seed =1
totalnumexample = 100
rng = np.random.RandomState(seed)
X = np.float32(rng.rand(100,2))
Y = np.float32([[float((x1+x2)<1)] for (x1,x2) in X])
x1 = np.where(Y==0)[0]
x2 = Y==1  #这里忘记了
#plt.plot(X[:,0],X[:,1],'ro')

plt.show()
BATCH_SIZE = 20
LEARNING_RATE_BASE = 0.1
LEARNING_RATE_DECAY = 0.9
LEARNING_STEP =2
global_step = tf.Variable(100,trainable=False)

tf_x = tf.placeholder(dtype=tf.float32,shape=(None,2))
tf_y = tf.placeholder(dtype=tf.float32,shape=(None,1))

w1 = tf.Variable(tf.random_normal([2,3]),dtype=tf.float32)
w2 = tf.Variable(tf.random_normal([3,1],dtype=tf.float32))
bias = tf.Variable(0.0,dtype=tf.float32)

a = tf.matmul(tf_x,w1)+bias
y_ = tf.matmul(a,w2)

loss = tf.reduce_mean(tf.square(y_-tf_y))
learning_rate = tf.train.exponential_decay(learning_rate=LEARNING_RATE_BASE,decay_rate=LEARNING_RATE_DECAY,
                                           global_step=global_step,staircase=True,decay_steps=LEARNING_STEP)

train = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

with tf.Session() as sess:
    init_op = tf.global_variables_initializer()
    sess.run(init_op)
    
    STEPS = 30000
    for i in range(STEPS):
        
        start = i%BATCH_SIZE%100
        end = start + BATCH_SIZE
        sess.run(train,feed_dict={tf_x:X[start:end],tf_y:Y[start:end]})
        
        if  i %500 ==0:
            print('learning_rate:\n',sess.run(learning_rate))
            print('loss is :\n',sess.run(loss,feed_dict={tf_x:X,tf_y:Y}))
            print('w1:\n',sess.run(w1))

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值