神经网络建立简易流程
import tensorflow as tf
#创建连接层
model = tf.keras.Sequential()
#kernel_initializer,对hidden层初始化操作
initializer = tf.initializers.RandomNormal(stddev=0.01)
model.add(tf.keras.layers.Dense(1024,activation = 'relu',kernel_initializer = initializer ,input_dim = len(feature)))
model.add(tf.keras.layers.Dense(512,activation = 'relu.',kernel_initializer = initializer ))
model.add(tf.keras.layers.Dense(256,activation = 'relu.',kernel_initializer = initializer ))
model.add(tf.keras.layers.Dense(128,activation = 'relu.',kernel_initializer = initializer ))
model.add(tf.keras.layers.Dense(64,activation = 'relu.',kernel_initializer = initializer ))
model.add(tf.keras.layers.Dense(1,activation = 'sigmoid'))#如果是分类任务,就需要调用sigmoid激活函数,如果是回归,则无需写
#设置优化器
adam = keras.optimizers.Adam(learning_rate = 0.015)
model.compile(loss = 'binary_crossentropy',optimizer = adam,metric = ['accuracy'])#loss是根据你的任务而定,此处是分类任务
#定义回调函数,
callbacks = [
#设置早停指标
tf.keras.callbacks.EarlyStopping(monitor='loss',patience=20,verbose=10),
#保存最优模型
tf.keras.callbacks.ModelCheckpoint('./heckpoint/multi_layer_best_model.h5',monitor='val_loss',save_best_only=True,verbose=0),
#实现动态学习率,factor学习率变化百分比,patience表示如果val_loss在5轮没有提升则改变学习率,min_lr设定最小学习率为0.001
tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', factor=0.2, patience=5, verbose=0,min_lr=0.001
)
]
#模型训练
model.fit(X_train,y_train,batch_size=batch,callbacks=[callbacks],validation_data=(X_val,y_val),epochs = epochs)
#模型评估
model_evaluate = model.evaluate(x,y) #查看准确率
#模型预测
y_pred = model.predict(x_test)