TensorFlow学习记录(七)---实现单层人工神经网络

 

 

import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#加载数据集
TRAIN_URL ="http://download.tensorflow.org/data/iris_training.csv"
train_path = tf.keras.utils.get_file(TRAIN_URL.split('/')[-1],TRAIN_URL)
df_iris_train = pd.read_csv(train_path,header = 0)
TEST_URL ="http://download.tensorflow.org/data/iris_test.csv"
test_path = tf.keras.utils.get_file(TEST_URL.split('/')[-1],TEST_URL)
df_iris_test = pd.read_csv(test_path,header = 0)
iris_train = np.array(df_iris_train)
iris_test = np.array(df_iris_test)
#数据预处理
x_train = iris_train[:,0:4]
y_train = iris_train[:,4]
x_test = iris_test[:,0:4]
y_test = iris_test[:,4]
x_train = x_train-np.mean(x_train,axis=0)#中心化处理
x_test = x_test-np.mean(x_test,axis=0)
X_train = tf.cast(x_train,tf.float32)#转化为32位浮点数
Y_train = tf.one_hot(tf.constant(y_train,dtype=tf.int32),3)#转化为独热编码
X_test = tf.cast(x_test,tf.float32)
Y_test = tf.one_hot(tf.constant(y_test,dtype=tf.int32),3)
#设置超参数
learn_rate = 0.5
iter = 50
display_step = 10
#设置模型参数初始值
np.random.seed(612)
W = tf.Variable(np.random.randn(4,3),dtype = tf.float32)#正太分布的随机值
B = tf.Variable(np.zeros([3]),dtype = tf.float32)#通常把偏置值b初始化为0
#训练模型
acc_train =[]
acc_test =[]
cce_train =[]
cce_test =[]

for i in range(0,iter+1):
    with tf.GradientTape() as tape:
        PRED_train = tf.nn.softmax(tf.matmul(X_train,W)+B)
        loss_train = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true =Y_train,y_pred=PRED_train))
    PRED_test = tf.nn.softmax(tf.matmul(X_test,W)+B)
    loss_test = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true=Y_test, y_pred=PRED_test))
    #准确率
    accuracy_train = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(PRED_train.numpy(),axis=1),y_train),tf.float32))
    accuracy_test = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(PRED_test.numpy(), axis=1), y_test), tf.float32))

    acc_train.append(accuracy_train)
    acc_test.append(accuracy_test)
    cce_train.append(loss_train)
    cce_test.append(loss_test)

    grads = tape.gradient(loss_train,[W,B])
    W.assign_sub(learn_rate*grads[0])#形状为(4,3)的张量
    B.assign_sub(learn_rate*grads[1])#形状为(3,)的张量
    if i %display_step == 0:
        print("i:%i,trainacc:%f,trainloss:%f,testacc:%f,testloss:%f "%(i,accuracy_train,loss_train,accuracy_test,loss_test))
plt.figure(figsize=(10,3))
#可视化输出
plt.subplot(121)
plt.plot(cce_train,color = "blue",label = "train")
plt.plot(cce_test,color = "red",label = "test")
plt.xlabel("Iteraation")
plt.ylabel("Loss")
plt.legend()

plt.subplot(122)
plt.plot(acc_train,color = "blue",label = "train")
plt.plot(acc_test,color = "red",label = "test"  )
plt.xlabel("Iteraation")
plt.ylabel("Accuracy")
plt.legend()

plt.show()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

前额皮质

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值