一.使用TensorFlow对MNITS数据集进行深度学习
from __future__ import absolute_import, division, print_function
from abc import ABC
from tensorflow.keras.datasets import mnist
from tensorflow.keras import Model, layers
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
# MNIST数据集参数
num_classes = 10 # 总的图像类别 (数字0-9).
num_features = 784 # 一张图像所拥有的特征点,即像素点个数 (图像大小: 28*28=784).
# 训练参数.
learning_rate = 0.1 # 学习率,控制收敛的过程
training_steps = 2000 # 迭代训练次数
batch_size = 256 # 训练时所使用的样本数,一般从总样本中随机抽取
display_step = 100 # 计算过程中,显示训练结果的间隔
# 神经网络的结构
n_hidden_1 = 128 # 第一层网络的神经元个数
n_hidden_2 = 256 # 第二层神经元的个数
# 准备MNIST数据集用于训练
(x_train, y_train), (x_test, y_test) = mnist.load_data() # 从MNIST数据源加载数据,train:60000组(28,28)的uint8数据
x_train, x_test = np.array(x_train, np.float32), np.array(x_test, np.float32) # 将从MNIST加载的uint8型数据转换成float32型数据
x_train, x_test = x_train.reshape([-1, num_features]), x_test.reshape([-1, num_features]) # 将(28,28)的数组转换为(784,1)的数组
x_train, x_test = x_train / 255., x_test / 255. # 将0-255的数据范围映射到0-1的范围内
# 使用 tf.data API 打乱数据组并选取部分数据作为样本,选取的样本数量由batch_size决定
train_data = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_data = train_data.repeat().shuffle(5000).batch(batch_size).prefetch(1)
# 创建用于生成神经网络实例的类
class NeuralNet(Model, ABC):
# Set layers.
def __init__(self):
super(NeuralNet, self).__init__()
# First fully-connected hidden layer.
self.fc1 = layers.Dense(n_hidden_1, activation=tf.nn.relu)
# First fully-connected hidden layer.
self.fc2 = layers.Dense(n_hidden_2, activation=tf.nn.relu)
# Second fully-connected hidden layer.
self.out = layers.Dense(num_classes)
# Set forward pass.
def call(self, x, is_training=False):
x = self.fc1(x)
x = self.fc2(x)
x = self.out(x)
if not is_training:
# tf cross entropy expect logits without softmax, so only
# apply softmax when not training.
x = tf.nn.softmax(x)
return x
# 创建一个神经网络模型.
neural_net = NeuralNet()
# Cross-Entropy Loss.交叉熵损失计算函数
def cross_entropy_loss(x, y):
"""
计算logits和labels之间的稀疏softmax交叉熵
:param x:
:param y:
:return: 计算张量tensor沿着指定的数轴(tensor的某一维度)上的的平均值
"""
y = tf.cast(y, tf.int64) # 将标签张量转换为指定数据(int64)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=x) # 计算logits和labels之间的稀疏softmax交叉熵
return tf.reduce_mean(loss) # 计算张量tensor沿着指定的数轴(tensor的某一维度)上的的平均值
# Accuracy metric.准确率计算
def accuracy(y_pred, y_true):
"""
计算预测值y_pred和真实值y_true之间的准确率
:param y_pred:
:param y_true:
:return: 准确率的平均值
"""
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64)) # 计算准确率
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1) # 计算准确率的平均值
# Stochastic gradient descent optimizer.
optimizer = tf.optimizers.SGD(learning_rate)
# Optimization process.优化过程
def run_optimization(x, y):
"""
对参数进行优化
:param x: 训练样本
:param y: 训练标签
:return:
"""
# Wrap computation inside a GradientTape for automatic differentiation.
with tf.GradientTape() as g:
pred = neural_net(x, is_training=True) # 生成计算用的神经网络实例对象
loss = cross_entropy_loss(pred, y) # 计算交叉熵损失
trainable_variables = neural_net.trainable_variables # 创建训练用的变量
gradients = g.gradient(loss, trainable_variables) # 计算梯度
optimizer.apply_gradients(zip(gradients, trainable_variables)) # 通过梯度更新参数w和b
# 使用导入的数据对模型进行训练
for step, (batch_x, batch_y) in enumerate(train_data.take(training_steps), 1):
run_optimization(batch_x, batch_y) # 启动参数优化
if step % display_step == 0:
pred = neural_net(batch_x, is_training=True) # 提取神经网络中的参数
loss = cross_entropy_loss(pred, batch_y) # 提取交叉熵损失
acc = accuracy(pred, batch_y) # 计算准确率
print("step: %i, loss: %f, accuracy: %f" % (step, loss, acc)) # 输出计算结果
# 计算生成的模型在验证数据集上的正确率
pred = neural_net(x_test, is_training=False)
print("Test Accuracy: %f" % accuracy(pred, y_test))
neural_net.summary() # 生成的模型一览
neural_net.save("G:\\MNIST_model") # 保存训练好的模型
# 从数据集上提取5张照片,验证结果的正确性
n_images = 5
test_images = x_test[100:100 + n_images]
predictions = neural_net(test_images)
# 显示图像
for i in range(n_images):
plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction: %i" % np.argmax(predictions.numpy()[i]))
二.使用TensorFlow生成的model对图像进行预测
from __future__ import absolute_import, division, print_function
from matplotlib import pyplot as plt
from tensorflow import keras
import numpy as np
import cv2
neural_net = keras.models.load_model("G:\\MNIST_model") # 导入生成的模型
test_images = cv2.imread("G:\\2.png") # 读取图片
test_images = cv2.cvtColor(test_images, cv2.COLOR_RGB2GRAY) # 将RGB图片转换为灰度值图片
test_images = np.array(test_images, np.float32) # 将加载的uint8型数据转换成float32型数据
test_images = test_images.reshape(-1, 784) # 将(28,28)的数组转换为(784,1)的数组
test_images = test_images / 255. # 将0-255的数据范围映射到0-1的范围内
predictions = neural_net(test_images) # 使用模型对图片进行预测
plt.imshow(np.reshape(test_images, [28, 28]), cmap='gray')
plt.show() # 显示图像
print("Model prediction: %i" % np.argmax(predictions.numpy())) # 打印预测的结果
输入图像:
预测结果:
三.使用TensorFlow求解线性回归
import tensorflow.compat.v1 as tf
import random
tf.disable_v2_behavior()
m = tf.get_variable("m", [], initializer=tf.constant_initializer(0.)) # 创建变量
b = tf.get_variable("b", [], initializer=tf.constant_initializer(0.)) # 创建变量
init = tf.global_variables_initializer() # 初始化变量
input_placeholder = tf.placeholder(tf.float32) # 创建输入占位符
output_placeholder = tf.placeholder(tf.float32) # 创建输出占位符
x = input_placeholder
y = output_placeholder
y_guess = m * x + b # 创建运算表达式
loss = tf.square(y - y_guess) # 定义损失函数
optimizer = tf.train.GradientDescentOptimizer(1e-3) # 设置优化阈值
train_op = optimizer.minimize(loss) # 设置极小值
sess = tf.Session() # 创建会话
sess.run(init) # 启动会话
true_m = random.random() # 设置数据源
true_b = random.random() # 设置数据源
for update_i in range(50000):
input_data = random.random() # 设置输入
output_data = true_m * input_data + true_b # 设置输出
_loss, _ = sess.run([loss, train_op], feed_dict={input_placeholder: input_data, output_placeholder: output_data})
print(update_i, _loss)
print("True parameters: m=%s, b=%s" % (true_m, true_b)) # 打印设置的数据源
print("Learned parameters: m=%s, b=%s" % (tuple(sess.run([m, b])))) # 打印拟合的数据源