# 利用鸢尾花数据集,实现前向传播、反向传播、可视化loss曲线
# 导入所需要的模块
import tensorflow as tf
from sklearn import datasets # 从sklearn包datasets读入数据集
from matplotlib import pyplot as plt
import numpy as np
# 导入数据,分别为输入特征和标签
x_data = datasets.load_iris().data # 返回iris数据集所有输入特征
y_data = datasets.load_iris().target # 返回iris数据集所有标签
# 随机打乱数据(因为原始数据是顺序的,顺序不打乱会影响准确率)
# seed:随机数种子,是一个整数,当设置之后,每次生成的随机数都一样
np.random.seed(116) # 使用相同的seed,使输入特征/标签一一对应
np.random.shuffle(x_data)
np.random.seed(116)
np.random.shuffle(y_data)
tf.random.set_seed(116)
# 数据集分割出永不相见的训练集和测试集,训练集为前120行,测试集为后30行
x_train = x_data[:-30]
y_train = y_data[:-30]
x_test = x_data[-30:]
y_test = y_data[-30:]
# 转换x的数据类型,否则后面的矩阵相乘时会因为数据类型不一致二报错
x_train = tf.cast(x_train, tf.float32)
x_test = tf.cast(x_test, tf.float32)
# from_tensor_slices函数使输入特征和标签值一一对应
# 配对[输入特征,标签]对,每次喂入一小撮(batch),每32组标签对打包为一个batch,喂入神经网络时会以batch为一个单位
train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32)
test_db = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
# 定义神经网络中所有可训练参数。4个输入特征,故输入层为4个节点;因为3分类,故输出层为3个神经元
# 用tf.Variable()标记参数可训练
w1 = tf.Variable(tf.random.truncated_normal([4, 3], stddev=0.1, seed=1)) # 输入特征时4个,输出节点等于分类数是3个,则w1为一个4行3列的张量
b1 = tf.Variable(tf.random.truncated_normal([3], stddev=0.1, seed=1)) # b1必须与w1的维度保持一致
# 至此,神经网络的结构基本搭建完毕
lr = 0.1 # 学习率为0.1
train_loss_results = [] # 将每一轮的loss记录在此列表中,为后续画loss曲线提供数据
test_acc = [] # 将每轮的acc记录在此列表中,为后续画acc曲线提供数据
epoch = 500 # 循环500轮
loss_all = 0 # 没轮分为4个step,loss_all记录四个step生成四个loss的和
# 训练部分
# 嵌套循环迭代,两层for循环,with结构更新参数,显示当前loss
for epoch in range(epoch): # 数据集级别循环,每个epoch循环一次数据集
for step, (x_train, y_train) in enumerate(train_db): # batch级别迭代,每个step循环一次
with tf.GradientTape() as tape: # with结构记录梯度信息
y = tf.matmul(x_train, w1) + b1 # 神经网络乘加运算
y = tf.nn.softmax(y) # 使输出y符合概率分布(此操作后与独热码同量级,可相减求loss)
y_ = tf.one_hot(y_train, depth=3) # 向前传输过程计算y,将标签值转换为独热码格式,方便计算loss
loss = tf.reduce_mean(tf.square(y_ - y)) # 采用均方误差损失函数mse=mean
loss_all += loss.numpy() # 计算总loss
# 计算loss对个参数的梯度
grads = tape.gradient(loss, [w1, b1]) # 损失函数loss分别对参数w1和b1计算偏导数
w1.assign_sub(lr * grads[0]) # 参数w1和b1自更新,梯度更新
b1.assign_sub(lr * grads[1])
# 每个epoch,打印loss信息
print("Epoch {}, loss: {}".format(epoch, loss_all/4)) # 打印出这一轮epoch吼的损失函数值
train_loss_results.append(loss_all / 4) # 将四个step的loss求平均值记录在此变量中
loss_all = 0 # loss_all归零,为记录下一个epoch的loss做准备
# 测试部分
# total_correct为预测对的样本,total_number为测试的总样本数,将这两个变量都初始化
total_correct, total_number = 0, 0
for x_test, y_test in test_db:
# 使用更新后的参数进行预测
y = tf.matmul(x_test, w1) + b1 # 前向传播计算出y,y为预测结果
y = tf.nn.softmax(y) # y符合概率分布
pred = tf.argmax(y, axis=1) # 返回y中最大值的索引,即预测的分类
pred = tf.cast(pred, dtype=y_test.dtype) # 将pred转换为y_text的数据类型
correct = tf.cast(tf.equal(pred, y_test), dtype=tf.int32) # 若分类正确,则correct=1,否则为0。调整数据类型与标签一致,将bool转换为int
correct = tf.reduce_sum(correct) # 将每个batch的correct数加起来
total_correct += int(correct)
total_number += x_test.shape[0] # total_number为测试的总样本数,也就是x_test的行数,shape[0]返回变量的行数
acc = total_correct / total_number # 总的准确率等于total_correct/total_number
test_acc.append(acc)
print("Test_acc:", acc)
print("-----------------------------------")
# acc/loss可视化,绘制loss曲线
plt.title('Loss Function Curve') # 图片标题
plt.xlabel('Epoch') # x轴名称
plt.ylabel('Loss') # y轴名称
plt.plot(train_loss_results, label="$Loss$") # 逐点画出train_loss_results值并连线
plt.legend() # 画出曲线图标
plt.show() # 画出图像
# 绘制Accuracy曲线
plt.title('Acc Curve') # 图片标题
plt.xlabel('Epoch') # x轴变量名称
plt.ylabel('Acc') # y轴变量名称
plt.plot(test_acc, label="$Accuracy$")
plt.legend()
plt.show()
运行结果
2021-04-16 21:06:38.265639: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cudart64_101.dll'; dlerror: cudart64_101.dll not found
2021-04-16 21:06:38.266035: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.
2021-04-16 21:06:41.569743: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library nvcuda.dll
2021-04-16 21:06:42.438846: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1561] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: GeForce MX350 computeCapability: 6.1
coreClock: 1.468GHz coreCount: 5 deviceMemorySize: 2.00GiB deviceMemoryBandwidth: 52.21GiB/s
2021-04-16 21:06:42.440757: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cudart64_101.dll'; dlerror: cudart64_101.dll not found
2021-04-16 21:06:42.442853: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cublas64_10.dll'; dlerror: cublas64_10.dll not found
2021-04-16 21:06:42.444519: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cufft64_10.dll'; dlerror: cufft64_10.dll not found
2021-04-16 21:06:42.446056: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'curand64_10.dll'; dlerror: curand64_10.dll not found
2021-04-16 21:06:42.447330: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cusolver64_10.dll'; dlerror: cusolver64_10.dll not found
2021-04-16 21:06:42.448604: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cusparse64_10.dll'; dlerror: cusparse64_10.dll not found
2021-04-16 21:06:42.449566: W tensorflow/stream_executor/platform/default/dso_loader.cc:55] Could not load dynamic library 'cudnn64_7.dll'; dlerror: cudnn64_7.dll not found
2021-04-16 21:06:42.450519: W tensorflow/core/common_runtime/gpu/gpu_device.cc:1598] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.
Skipping registering GPU devices...
2021-04-16 21:06:42.452004: I tensorflow/core/platform/cpu_feature_guard.cc:143] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
2021-04-16 21:06:42.461470: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x22415ed6e70 initialized for platform Host (this does not guarantee that XLA will be used). Devices:
2021-04-16 21:06:42.462016