tensorflow 2

import tensorflow as tf
import numpy as np

def test1():
    #create data
    x_data=np.random.rand(100).astype(np.float32)
    y_data=x_data*0.1+0.3
    
    #create tensorflow structure
    Weights=tf.Variable(tf.random_uniform([1],-1.0,1.0)) #一维,范围[-1,1]
    biases=tf.Variable(tf.zeros([1]))
    
    y=Weights*x_data+biases
    
    loss=tf.reduce_mean(tf.square(y-y_data))
    
    #建立优化器,减小误差,提高参数准确度,每次迭代都会优化
    optimizer=tf.train.GradientDescentOptimizer(0.5) #学习效率<1
    train=optimizer.minimize(loss)
    
    #初始化变量
    init=tf.global_variables_initializer()
    
    with tf.Session() as sess:
        sess.run(init)
        #train
        for step in range(201):
            sess.run(train)
            if step%20==0:
                print(step,sess.run(Weights),sess.run(biases))

def test2():
    node1 = tf.constant(3.0, dtype=tf.float32)
    node2 = tf.constant(4.0)# also tf.float32 implicitly
    print(node1, node2)
    sess = tf.Session()
    print(sess.run([node1, node2]))
    node3 = tf.add(node1, node2)
    print("node3:", node3)
    print("sess.run(node3):", sess.run(node3))
    
    a = tf.placeholder(tf.float32)
    b = tf.placeholder(tf.float32)
    adder_node = a + b  # + provides a shortcut for tf.add(a, b)
    print(sess.run(adder_node, {a:3, b:4.5}))
    print(sess.run(adder_node, {a: [1,3], b: [2,4]}))
    add_and_triple = adder_node *3.
    print(sess.run(add_and_triple, {a:3, b:4.5}))


def test3():
    W = tf.Variable([.3], dtype=tf.float32)
    b = tf.Variable([-.3], dtype=tf.float32)
    x = tf.placeholder(tf.float32)
    linear_model = W*x + b    
    init = tf.global_variables_initializer()
    
    sess = tf.Session()
    sess.run(init)
    
    print(sess.run(linear_model, {x: [1,2,3,4]}))
    print(sess.run(linear_model, {x: [[1,2],[3,4]]}))
    
    y = tf.placeholder(tf.float32)
    squared_deltas = tf.square(linear_model - y)
    loss = tf.reduce_sum(squared_deltas)
    loss1 = tf.reduce_mean(squared_deltas)
    
    print(sess.run(linear_model, {x:[1,2,3,4]}))
    print(sess.run(squared_deltas, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
    print(sess.run(loss, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
    print(sess.run(loss/4, {x: [1,2,3,4], y: [0, -1, -2, -3]}))
    print(sess.run(loss1, {x: [1,2,3,4], y: [0, -1, -2, -3]}))

def test4():
    b = tf.Variable([-.3], dtype=tf.float32)
    fixb = tf.assign(b, [1.])
    
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    print(sess.run(fixb))
    

def test5():
    import tensorflow as tf
    mnist = tf.keras.datasets.mnist
    (x_train, y_train),(x_test, y_test) = mnist.load_data()
    print(type(x_train))
    print(type(y_train))
    print(type(x_test))
    print(type(y_test))
    x_train, x_test = x_train / 255.0, x_test / 255.0
    
    model = tf.keras.models.Sequential([
      tf.keras.layers.Flatten(input_shape=(28, 28)),
      tf.keras.layers.Dense(512, activation=tf.nn.relu),
      tf.keras.layers.Dropout(0.2),
      tf.keras.layers.Dense(10, activation=tf.nn.softmax)
    ])
    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    
    model.fit(x_train, y_train, epochs=5)
    model.evaluate(x_test, y_test)

def test6():
    # 给定type,tf大部分只能处理float32数据
    input1 = tf.placeholder(tf.float32)
    input2 = tf.placeholder(tf.float32)
    
    # Tensorflow 1.0 修改版
    # tf.mul---tf.multiply
    # tf.sub---tf.subtract
    # tf.neg---tf.negative
    output = tf.multiply(input1, input2)
    
    with tf.Session() as sess:
        # placeholder在sess.run()的时候传入值
        print(sess.run(output, feed_dict={input1: [7.], input2: [2.]}))


def add_layer(inputs,in_size,out_size,activation_function=None):
    #Weights是一个矩阵,[行,列]为[in_size,out_size]
    Weights=tf.Variable(tf.random_normal([in_size,out_size]))#正态分布
    #初始值推荐不为0,所以加上0.1,一行,out_size列
    biases=tf.Variable(tf.zeros([1,out_size])+0.1)
    #Weights*x+b的初始化的值,也就是未激活的值
    Wx_plus_b=tf.matmul(inputs,Weights)+biases

    #激活

    if activation_function is None:
        #激活函数为None,也就是线性函数
        outputs=Wx_plus_b
    else:
        outputs=activation_function(Wx_plus_b)
    return outputs

def test7():
    """定义数据形式"""
    # (-1,1)之间,有300个单位,后面的是维度,x_data是有300行(300个例子)
    x_data=np.linspace(-1,1,300)[:,np.newaxis]
    # 加噪声,均值为0,方差为0.05,大小和x_data一样
    print(x_data.shape)
    noise=np.random.normal(0,0.05,x_data.shape)
    y_data=np.square(x_data)-0.5+noise
    
    xs=tf.placeholder(tf.float32,[None,1])
    ys=tf.placeholder(tf.float32,[None,1])
    
    """建立网络"""
    #定义隐藏层,输入1个节点,输出10个节点
    l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
    #定义输出层
    prediction=add_layer(l1,10,1,activation_function=None)
    
    """预测"""
    #损失函数,算出的是每个例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
    loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
    
    """训练"""
    #优化算法,minimize(loss)以0.1的学习率对loss进行减小
    train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
    
    init=tf.global_variables_initializer()
    
    with tf.Session() as sess:
        sess.run(init)
        for i in range(1000):
            sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
            if i%50==0:
                print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))

def test8():
    import matplotlib.pylab as plt
    """定义数据形式"""
    # (-1,1)之间,有300个单位,后面的是维度,x_data是有300行(300个例子)
    x_data=np.linspace(-1,1,300)[:,np.newaxis]
    # 加噪声,均值为0,方差为0.05,大小和x_data一样
    noise=np.random.normal(0,0.05,x_data.shape)
    y_data=np.square(x_data)-0.5+noise
    
    xs=tf.placeholder(tf.float32,[None,1])
    ys=tf.placeholder(tf.float32,[None,1])

    """建立网络"""
    #定义隐藏层,输入1个节点,输出10个节点
    l1=add_layer(xs,1,10,activation_function=tf.nn.relu)
    #定义输出层
    prediction=add_layer(l1,10,1,activation_function=None)
    
    """预测"""
    #损失函数,算出的是每个例子的平方,要求和(reduction_indices=[1],按行求和),再求均值
    loss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))
    
    """训练"""
    #优化算法,minimize(loss)以0.1的学习率对loss进行减小
    train_step=tf.train.GradientDescentOptimizer(0.1).minimize(loss)
    
    init=tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
    
        fig=plt.figure()
        #连续性的画图
        ax=fig.add_subplot(1,1,1)
        ax.scatter(x_data,y_data)
        # 不暂停
        plt.ion()
        # plt.show()绘制一次就会暂停
        # plt.show() #也可以用plt.show(block=False)来取消暂停,但是python3.5以后提供了ion的功能,更方便
        for i in range(1000):
            sess.run(train_step,feed_dict={xs:x_data,ys:y_data})
            if i%50==0:
                # print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))
                #尝试先抹除,后绘制第二条线
                #第一次没有线,会报错,try就会忽略错误,然后紧接着执行下面的步骤
                try:
                    # 画出一条后抹除掉,去除第一个线段,但是只有一个,也就是抹除当前的线段
                    ax.lines.remove(lines[0])
                except Exception:
                    pass
                prediction_value=sess.run(prediction,feed_dict={xs:x_data})
                lines=ax.plot(x_data,prediction_value,'r-',lw=5) #lw线宽
    
                # 暂停0.1s
                plt.pause(0.1)

    
#test1()
#test2() 
#test3()
#test4()
#test5()    
#test6()
#test7()
test8()

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值