tf2单层神经网络mnist手写数字识别

40 篇文章 0 订阅
34 篇文章 0 订阅
  • tf2的mnist的xtrain和xtest取值范围0~255,需要除以255,否则y_pre全是nan
  • 需要把mnist.npz放在C:\Users\Administrator.keras\datasets
import tensorflow.compat.v1 as tf
import tensorflow as tf2
tf.disable_v2_behavior()
import numpy as np
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt

#func是激活函数
def add_layer(inputs,in_size,out_size,func=None):
    W=tf.Variable(tf2.random.normal([in_size,out_size]))
    b=tf.Variable(tf2.zeros([1,out_size])+.1)
    Wx_b=tf2.matmul(inputs,W)+b
    if not func:
        outputs=Wx_b
    else:
        outputs=func(Wx_b)
    return outputs


def run(x_data,y_data):
    global pred,xs,ys
    xs=tf.placeholder(tf2.float32,[None,x_data.shape[-1]])
    ys=tf.placeholder(tf2.float32,[None,10])

    # pred=add_layer(xs,xs.shape[-1],10,tf2.nn.elu)
    pred=add_layer(xs,xs.shape[-1],10,tf2.nn.softmax)

	#先求每一行的和,再求均值
    cross_entropy=tf.reduce_mean(-tf.reduce_sum(ys*tf.log(pred),reduction_indices=[1]))
	#和上面的返回的结果差10倍
    #cross_entropy=-tf.reduce_mean(ys*tf.log(pred))
    #clip_by_value(X,a,b)可以把X的每个值压缩到a~b之间
    #cross_entropy = tf.reduce_mean(-tf.reduce_sum(ys * tf.log(tf.clip_by_value(pred, 1e-10, 1.0)), reduction_indices=1))

    train_step=tf.train.GradientDescentOptimizer(.3).minimize(cross_entropy)
    #train_step =tf.train.AdamOptimizer(.1).minimize(cross_entropy)

    se.run(tf.global_variables_initializer())
    results=[]
    for i in range(2000):
        #co=100
        #batch_xs,batch_ys=x_train[i*co:i*co+co],y_train[i*co:i*co+co]
        #tf2不知道next_batch对应的方法,自己实现随机取100组数据,randint会生成重复的数
        # random_index=np.random.randint(0, x_train.shape[0], 100)
        random_index=np.random.choice(x_train.shape[0], 10, replace=False)
        batch_xs,batch_ys=x_train[random_index],y_train[random_index]

        se.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})

        if i%50==0:
            acc=compute_accuracy(x_test,y_test)
            results.append((i,acc))
            print(acc)
    results=np.array(results)
    plt.scatter(results[:,0],results[:,1])
    plt.show()

def compute_accuracy(v_xs,v_ys):
    y_pre=se.run(pred,feed_dict={xs:v_xs})
    correct_pred=tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf2.float32))
    result=se.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
    #result=se.run(accuracy,feed_dict={xs:v_xs})
    return result



if __name__=="__main__":
    (x_train, y_train), (x_test, y_test)=mnist.load_data()
    mym=mnist.load_data()
    #print(x_train.shape,y_train.shape,x_test.shape,y_test.shape)
    x_train=x_train.reshape(x_train.shape[0],-1)/255
    x_test=x_test.reshape(x_test.shape[0],-1)/255
    se =tf.Session()
    #y_train=se.run(tf2.one_hot(y_train,depth=10))
    #y_test=se.run(tf2.one_hot(y_test,depth=10))
    y_train=np.eye(10)[y_train]
    y_test=np.eye(10)[y_test]

    run(x_test,y_test)

在这里插入图片描述
上面的代码可以简化成下面

  • tf.layers.dense用来添加全连接层
import tensorflow.compat.v1 as tf
import tensorflow as tf2
tf.disable_v2_behavior()
import numpy as np
from tensorflow.keras.datasets import mnist
import matplotlib.pyplot as plt


def run(x_data,y_data):
    global pred,xs,ys
    xs=tf.placeholder(tf2.float32,[None,x_data.shape[-1]])
    ys=tf.placeholder(tf2.float32,[None,10])
	
	pred=tf.layers.dense(inputs=xs,units=10, activation=tf2.nn.softmax)
	
	cross_entropy=tf.nn.softmax_cross_entropy_with_logits(labels=ys, logits=pred)
    train_step=tf.train.GradientDescentOptimizer(.3).minimize(cross_entropy)

    se.run(tf.global_variables_initializer())
    results=[]
    for i in range(2000):
        random_index=np.random.choice(x_train.shape[0], 10, replace=False)
        batch_xs,batch_ys=x_train[random_index],y_train[random_index]

        se.run(train_step,feed_dict={xs:batch_xs,ys:batch_ys})

        if i%50==0:
            acc=compute_accuracy(x_test,y_test)
            results.append((i,acc))
            print(acc)
    results=np.array(results)
    plt.scatter(results[:,0],results[:,1])
    plt.show()

def compute_accuracy(v_xs,v_ys):
    y_pre=se.run(pred,feed_dict={xs:v_xs})
    correct_pred=tf.equal(tf.argmax(y_pre,1),tf.argmax(v_ys,1))
    accuracy=tf.reduce_mean(tf.cast(correct_pred,tf2.float32))
    result=se.run(accuracy,feed_dict={xs:v_xs,ys:v_ys})
    return result



if __name__=="__main__":
    (x_train, y_train), (x_test, y_test)=mnist.load_data()

    x_train=x_train.reshape(x_train.shape[0],-1)/255
    x_test=x_test.reshape(x_test.shape[0],-1)/255
    se =tf.Session()

    y_train=np.eye(10)[y_train]
    y_test=np.eye(10)[y_test]

    run(x_test,y_test)
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值