1. 六步法
①import
②train,test
③model = tf.keras.model.Sequential
④model.compile
⑤model.fit
⑥model.summary
2. model = tf.keras.model.Sequential( [ 网络结构 ] )
e.g.
拉直层:tf.keras.layers.Flatten() #把输入特征拉直变成一维数组。
全连接层:tf.keras.layer.Dense(神经元个数, activation="激活函数", kernel_regularizer=哪种正则化)
activation(字符串给出)可选:relu、softmax、sigmoid、tanh
kernel_regularizer可选:tf.keras.regularizers.l1()、tf.keras.regularizers.l2()
卷积层:tf.keras.layers.Conv2D ( filters = 卷积核个3. 数,kernel_size = 卷积核尺寸,
strides = 卷积步长,padding = "valid" or "same")
LSTM层:tf.keras.LSTM()
3. model.compile( optimizer = 优化器,loss = 损失函数,metrics = ["准确率"])
Optimizer可选:
"sgd" or tf.keras.optimizer.SGD( lr=学习率, momentum=动量参数)
"adagrad" or tf.keras.optimizer.Adagrad( lr=学习率 )
"adadelta" or tf.keras.optimizer.Adadelta( lr=学习率 )
"adam" or tf.keras.optimizer.Adam( lr=学习率, beta_1=0.9, beta_2=0.999)
loss可选:
“mse” or tf.keras.losses.MeanSquaredError()
"sparse_categorical_crossentropy" or
tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False) #如果神经网络预测前经
过了概率分布,为False。反之为True。
metrics可选:
"accurancy":y_和y都是数值,如y_=[1] y=[1]
"categorical_accuracy":y_和y都是独热码(概率分布),如y_=[1] y=[0.256, 0.695, 0.048]
4. model.fit
model.fit(训练集的输入特征,训练集的标签,
batch_size= , epoch= ,
validation_data=(测试集的输入特征,训练集的标签),
validation_split=从训练集划分多少比例给测试集,
validation_freq=多少次epoch测试一次)
5. model.summary()
鸢尾花代码:
import tensorflow as tf
from sklearn import datasets
import numpy as np
x_train = datasets.load_iris().data
y_train = datasets.load_iris().target
np.random.seed(116)
np.random.shuffle(x_train)
np.random.seed(116)
np.random.shuffle(y_train)
tf.random.set_seed(116)
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(3, activation='softmax', kernel_regularizer=tf.keras.regularizers.l2())
])
model.compile(optimizer=tf.keras.optimizers.SGD(lr=0.1),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train, y_train, batch_size=32, epochs=500, validation_split=0.2, validation_freq=20)
model.summary()