Tensorflow2神经网络八股常用函数

######### Tensorflow常用函数 ##########

算数运算

基本运算

tf.add(x, y, name=None)	# 求和
tf.sub(x, y, name=None)	# 减法
tf.mul(x, y, name=None)	# 乘法
tf.div(x, y, name=None)	# 除法
tf.mod(x, y, name=None)	# 取模
tf.abs(x, name=None)	# 求绝对值
tf.neg(x, name=None)	# 取负

函数

tf.square(x, name=None)	# 计算平方
tf.round(x, name=None)	# 舍入最接近的整数 [0.9, 2.5, 2.3, -4.4] > [ 1.0, 3.0, 2.0, -4.0 ]
tf.sqrt(x, name=None)	# 开根号
tf.pow(x, y, name=None)	# 幂次方
tf.exp(x, name=None)	# 计算e的次方
tf.log(x, name=None)	# 计算log,一个输入计算e的ln,两输入以第二输入为底
tf.cos(x, name=None)	#三角函数cosine
tf.sin(x, name=None)	#三角函数sine
tf.tan(x, name=None)	#三角函数tan
tf.atan(x, name=None)	#三角函数ctan

返回最值

tf.maximum(x, y, name=None)	#返回最大值
tf.minimum(x, y, name=None)	#返回最小值

矩阵数学函数

tf.diag(diagonal, name=None)
tf.transpose(a, perm=None, name='transpose')
tf.matmul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None)
tf.batch_matmul(x, y, adj_x=None, adj_y=None, name=None)
tf.matrix_determinant(input, name=None)
tf.batch_matrix_determinant(input, name=None)
tf.matrix_inverse(input, name=None)
tf.batch_matrix_inverse(input, name=None)
tf.cholesky(input, name=None)
tf.batch_cholesky(input, name=None)

张量操作

创建张量

a= tf.constant([[1,5]],dtype=tf.int32) # 维度:一维为个数,二维用[行,列],多维用[n,m,j...]

恒值张量

tf.zeros([2,3,2])
tf.ones(2)
tf.fill(2,3) # tf.fill(维度,指定值)

张量降维

tf.reduce_sum(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_prod(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_min(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_max(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_mean(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_all(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.reduce_any(input_tensor, reduction_indices=None, keep_dims=False, name=None)
tf.accumulate_n(inputs, shape=None, tensor_dtype=None, name=None)

序列

tf.linspace(start, stop, num, name=None) # 等差数列
tf.range(start, limit, delta=1) # delta为扩增量
x,y= np.mgrid[起始值:结束值步长, 起始值:结束值步长....] # 生成等差数组
x.ravel() # 将数组变为一维数组
np.c_[数组1, 数组2....] # 数组数值点配对

随机张量

tf.set_random_seed(seed)
tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
tf.random_uniform(shape, minval=0.0, maxval=1.0, dtype=tf.float32, seed=None, name=None)
tf.random_shuffle(value, seed=None, name=None)
np.random.RandomState.rand(维度)  # 默认返回标量 # 返回[0,1)之间的随机数
np.random.shuffle(x) # 将数据打乱, 可设置seed来确保数据和标签打乱后相对应

数据格式转化

tf.cast(x, dtype, name=None) # tf.cast(a, tf.int32)

模型搭建、训练、测试、预测常用函数

输入特征和标签值一一对应。将数据集分为batch组数据,分批次喂入神经网络

tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32) 
tf.Variable(tf.random.truncated_normal([3], stddev=0.1, seed=1)) # 标记参数可训练,在神经网络八股中不常用
tf.GradientTape() # 进行先计算模型的梯度(也就是偏导)

神经网络八股六步法

1 import
2 train, test数据
3 描述网络的各方面性质(搭建神经元结构)
model = tf.keras.models.Sequential([网络结构])
①拉直层
tf.keras.layers.Flatten()
②全连接层
tf.keras.layers.Dense(神经元个数, activation = "激活函数”, kernel_regularizer = 哪种正则化)
#其中的activation可选relu、softmax、sigmoid、tanh
#其中的kernel_regularizer可选: tf.keras.regularizers.l1()、tf.keras,regularizers.l2()
③卷积层
tf.keras.layer.Conv2D(filters = 卷积核个数, kernel_size = 卷积核尺寸, strides = 卷积步长, padding = "valid" or“same")
④LSTM层
tf.keras.layers.LSTM()
4 配置训练方法
model.compile(optimizer = 优化器,
             loss = 损失函数,
             metrics = ['准确率'])   # 配置训练方法
            
            其中metrics可选
             'accuracy’: y和y_都是数值
             'categorical_accuracy': y和y_都是独热码(概率分布)
             'sparse_categorical_accuracy': 其中一个是数值,另一个是独热码
5 执行训练过程
model.fit(训练集的输入特征,训练集的标签, 
          batch_size= , epochs = ,
          validation_data = =(测试集的输入特征,测试集的标签)     
          validation_split = , 
          validation_freq = 多少次epoch测试一次) 

6 打印
model.summary

八股class,用class搭建神经网络

class MyModel(Model):
    def __init__(self):
        super(MyModel,self).__init__()
        self.name = 网络结构
    def call(self,x):
        y = self.name(x)
        return y
model = MyMdoel()

保存模型或加载本地模型

checkpoint_save_path = "./checkpoint/mnist.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)

# 对已存在的模型只读取最优参数
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                save_weights_only=True,
                                                save_best_only=True)
# 模型执行训练过程
model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
                    callbacks=[cp_callback])

用已训练的模型进行预测

# 先搭建好神经元网络,再加载本地模型参数
model.load_weights(model_save_path)
# 先对数据进行预处理,再进行模型预测
result = model.predict(x_predict)

sequential和class源代码

sequential

import tensorflow as tf
from sklearn import datasets
import numpy as np

x_train = datasets.load_iris().data
y_train = datasets.load_iris().target

np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)

model = tf.keras.models.Sequential([
    tf.keras.layers.Dense(3, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
])

model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False), 
              #这里注意,如果神经网络预测结果输出前没有经过概率分布,则是True,要是经过了概率分布,则是False,最典型的概率分布就是softmax
              metrics=['sparse_categorical_accuracy'])

model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.2, validation_freq=20)

model.summary()

class

import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
from sklearn import datasets
import numpy as np

x_train = datasets.load_iris().data
y_train = datasets.load_iris().target

np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)

class IrisModel(Model):
    def __init__(self):
        super(IrisModel, self).__init__()
        self.d1 = Dense(3, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())

    def call(self, x):
        y = self.d1(x)
        return y

model = IrisModel()

model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.2, validation_freq=20)
model.summary()

模型续训、保存、加载、预测源代码

模型训练

import tensorflow as tf
import os
import numpy as np
from matplotlib import pyplot as plt

# 设置打印的条数,无限条
np.set_printoptions(threshold=np.inf)

# 加载数据
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# 搭建神经元网络结构
model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])

# 配置训练方法
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

# 保存模型或加载本地模型
checkpoint_save_path = "./checkpoint/mnist.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)

# 对已存在的模型只读取最优参数
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                save_weights_only=True,
                                                save_best_only=True)

# 模型执行训练过程
history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
                    callbacks=[cp_callback])
model.summary()

print(len(y_train),len(y_test))

#########################    打印和存储参数      ###############################################

# print(model.trainable_variables)
# file = open('./weights.txt', 'w')
# for v in model.trainable_variables:
#     file.write(str(v.name) + '\n')
#     file.write(str(v.shape) + '\n')
#     file.write(str(v.numpy()) + '\n')
# file.close()

###############################################    show   ###############################################

# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

模型预测

from PIL import Image
import numpy as np
import tensorflow as tf

model_save_path = './checkpoint/mnist.ckpt'

model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')])
    
model.load_weights(model_save_path)
preNum = int(input("input the number of test pictures:"))

for i in range(preNum):
    image_path = input("the path of test picture:")
    img = Image.open(image_path)
    img = img.resize((28, 28), Image.ANTIALIAS)
    img_arr = np.array(img.convert('L'))

    img_arr = 255 - img_arr
                
    img_arr = img_arr / 255.0
    print("img_arr:",img_arr.shape)
    x_predict = img_arr[tf.newaxis, ...]
    print("x_predict:",x_predict.shape)
    result = model.predict(x_predict)
    
    pred = tf.argmax(result, axis=1)
    
    print('\n')
    tf.print(pred)

参考:https://www.bilibili.com/video/BV1B7411L7Qt/?p=25&spm_id_from=333.880.my_history.page.click&vd_source=a3ac9bc65ba9ebfb00e406bc60dd1df2

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值