第十章 keras 人工神经网络-函数式API

第十章 keras 人工神经网络

2021-2-13
python: 3.8.x

10.2.4 函数式API

10.2.4.1 单输入深宽神经网络
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
housing.data.shape
(20640, 8)
# 将数据集分为, 训练集与测试
X_train_full, X_test, y_train_full, y_test = train_test_split(housing.data, housing.target)
# 将总训练集分为,训练集与验证集
X_train, X_valid, y_train, y_valid = train_test_split(X_train_full, y_train_full)
# 将数据平移、归一
scaler = StandardScaler()
# StandardScaler在trainsform之前必须fit
X_train = scaler.fit_transform(X_train)
X_valid = scaler.transform(X_valid)
X_test = scaler.transform(X_test)
# 构建深宽神经网络
from tensorflow import keras
# 输入层
input_ = keras.layers.Input(shape=X_train.shape[1:])
# 隐藏层1,激活函数relu
hidden1 = keras.layers.Dense(30, activation="relu")
print(type(hidden1))
hidden1 = hidden1(input_)
# 隐藏层2,激活函数relu
hidden2 = keras.layers.Dense(30, activation="relu")(input_)
<class 'tensorflow.python.keras.layers.core.Dense'>
# 合并层, 将输入与隐藏层2链接到一层
concat = keras.layers.Concatenate()
print(type(concat))
concat = concat([input_, hidden2])
<class 'tensorflow.python.keras.layers.merge.Concatenate'>
#输出层
output = keras.layers.Dense(1)(concat)
# 生成模型对象
model = keras.Model(inputs=[input_], outputs=output)
print(type(model))
model.summary()
<class 'tensorflow.python.keras.engine.functional.Functional'>
Model: "model"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            [(None, 8)]          0                                            
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 30)           270         input_1[0][0]                    
__________________________________________________________________________________________________
concatenate (Concatenate)       (None, 38)           0           input_1[0][0]                    
                                                                 dense_1[0][0]                    
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 1)            39          concatenate[0][0]                
==================================================================================================
Total params: 309
Trainable params: 309
Non-trainable params: 0
__________________________________________________________________________________________________
# 编译模型
model.compile(loss="mean_squared_error",
             optimizer=keras.optimizers.SGD(lr=1e-3)
             )
# 训练模型
history = model.fit(X_train, y_train, epochs=20, 
                   validation_data=(X_valid, y_valid)
                   )
# 在测试集评估模型
mse_test = model.evaluate(X_test, y_test)
# 使用模型预测
X_new = X_test[:3]
y_pred = model.predict(X_new)
Epoch 1/20
363/363 [==============================] - 2s 3ms/step - loss: 2.5331 - val_loss: 0.8644
Epoch 2/20
363/363 [==============================] - 1s 2ms/step - loss: 0.8716 - val_loss: 0.7481
Epoch 3/20
363/363 [==============================] - 1s 2ms/step - loss: 0.7006 - val_loss: 0.6990
Epoch 4/20
363/363 [==============================] - 1s 2ms/step - loss: 0.6768 - val_loss: 0.6608
Epoch 5/20
363/363 [==============================] - 1s 3ms/step - loss: 0.6581 - val_loss: 0.6348
Epoch 6/20
363/363 [==============================] - 1s 2ms/step - loss: 0.6254 - val_loss: 0.6127
Epoch 7/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5998 - val_loss: 0.5927
Epoch 8/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5877 - val_loss: 0.5794
Epoch 9/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5580 - val_loss: 0.5656
Epoch 10/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5634 - val_loss: 0.5551
Epoch 11/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5569 - val_loss: 0.5459
Epoch 12/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5553 - val_loss: 0.5385
Epoch 13/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5331 - val_loss: 0.5324
Epoch 14/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5329 - val_loss: 0.5248
Epoch 15/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5233 - val_loss: 0.5188
Epoch 16/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5328 - val_loss: 0.5141
Epoch 17/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5457 - val_loss: 0.5101
Epoch 18/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5064 - val_loss: 0.5050
Epoch 19/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5139 - val_loss: 0.5019
Epoch 20/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5097 - val_loss: 0.4973
162/162 [==============================] - 0s 2ms/step - loss: 0.4945
type(history)
tensorflow.python.keras.callbacks.History
# history的一些属性
history.history.keys()
dict_keys(['loss', 'val_loss'])
history.params
{'verbose': 1, 'epochs': 20, 'steps': 363}
history.epoch
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
print(mse_test)
0.49453309178352356
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
print(type(history.history))
history_df = pd.DataFrame(history.history)
print(history_df.head())
<class 'dict'>
       loss  val_loss
0  1.614022  0.864443
1  0.792835  0.748122
2  0.713107  0.698974
3  0.671354  0.660821
4  0.643927  0.634811
# 绘制学习曲线
history_df.plot(figsize=(8, 5))
plt.grid(True)
plt.savefig("10-2-4-f1.png")
plt.show()

在这里插入图片描述

# 绘制模型结构图
keras.utils.plot_model(model, "10-2-4-f2.png", show_shapes=True)

在这里插入图片描述

10.2.4.2 多输入深宽神经网络
print(X_train.shape)
(11610, 8)
# 将特征 0-4 作为输入A, 2-7 作为输入B
input_A = keras.layers.Input(shape=[5],name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
# 将两个输入传递到合并层
concat = keras.layers.concatenate([input_A, hidden2])
output = keras.layers.Dense(1, name="output")(concat)
model = keras.Model(inputs=[input_A, input_B], outputs=[output])                          
# 绘制保存模型图
keras.utils.plot_model(model, "10-2-4-f3.png", show_shapes=True)

在这里插入图片描述

# 编译模型
model.compile(loss="mse", optimizer=keras.optimizers.SGD(lr=1e-3))
# 转换数据
X_train_A, X_train_B = X_train[:, :5], X_train[:, 2: 8]
X_valid_A, X_valid_B = X_valid[:, :5], X_valid[:, 2: 8]
X_test_A, X_test_B = X_test[:, :5], X_test[:, 2:8]
X_new_A, X_new_B = X_new[:, :5], X_new[:, 2:8]
# 训练模型
history = model.fit({"wide_input": X_train_A, "deep_input": X_train_B}, y_train, epochs=20,
                   validation_data=({"wide_input": X_valid_A, "deep_input": X_valid_B}, y_valid))
Epoch 1/20
363/363 [==============================] - 1s 3ms/step - loss: 4.6494 - val_loss: 1.1131
Epoch 2/20
363/363 [==============================] - 1s 2ms/step - loss: 1.0090 - val_loss: 0.8235
Epoch 3/20
363/363 [==============================] - 1s 2ms/step - loss: 0.8296 - val_loss: 0.7276
Epoch 4/20
363/363 [==============================] - 1s 2ms/step - loss: 0.7223 - val_loss: 0.6768
Epoch 5/20
363/363 [==============================] - 1s 3ms/step - loss: 0.6813 - val_loss: 0.6411
Epoch 6/20
363/363 [==============================] - 1s 2ms/step - loss: 0.6393 - val_loss: 0.6128
Epoch 7/20
363/363 [==============================] - 1s 2ms/step - loss: 0.6137 - val_loss: 0.5899
Epoch 8/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5766 - val_loss: 0.5704
Epoch 9/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5470 - val_loss: 0.5538
Epoch 10/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5385 - val_loss: 0.5404
Epoch 11/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5455 - val_loss: 0.5291
Epoch 12/20
363/363 [==============================] - 1s 3ms/step - loss: 0.5173 - val_loss: 0.5197
Epoch 13/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5238 - val_loss: 0.5148
Epoch 14/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5144 - val_loss: 0.5080
Epoch 15/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5081 - val_loss: 0.5013
Epoch 16/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4934 - val_loss: 0.5007
Epoch 17/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5092 - val_loss: 0.4920
Epoch 18/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5254 - val_loss: 0.4882
Epoch 19/20
363/363 [==============================] - 1s 3ms/step - loss: 0.4780 - val_loss: 0.4850
Epoch 20/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4774 - val_loss: 0.4822
# 使用evaluate() 在测试上做评估
mse_test = model.evaluate((X_test_A, X_test_B), y_test)
162/162 [==============================] - 0s 2ms/step - loss: 0.4763
# 预测
y_pred = model.predict({"wide_input": X_new_A, "deep_input":X_new_B})
y_pred
array([[2.8887858],
       [3.4206166],
       [1.5422924]], dtype=float32)
# 绘制学习曲线
pd.DataFrame(history.history).plot()
plt.grid(True)
plt.show()

在这里插入图片描述

10.2.4.3 多输出深宽神经网络
# 使用多输出实现正则化
print(X_train.shape)
(11610, 8)
# 构建模型
# 宽输入与深输入
input_A = keras.layers.Input(shape=[5], name="wide_input")
input_B = keras.layers.Input(shape=[6], name="deep_input")
# 隐藏层, 使用input_B 作为输入
hidden1 = keras.layers.Dense(30, activation="relu")(input_B)
hidden2 = keras.layers.Dense(30, activation="relu")(hidden1)
# 合并
concat = keras.layers.concatenate([input_A, hidden2])
# 多输出层
output = keras.layers.Dense(1, name="main_output")(concat) # 结果输出
aux_output = keras.layers.Dense(1, name="aux_output")(hidden2) # 正则化输出
model = keras.Model(inputs=[input_A, input_B], outputs=[output, aux_output])
# 绘制模型图形
keras.utils.plot_model(model, "10-2-4-f4.png", show_layer_names=True, show_shapes=True)

在这里插入图片描述

# 编译模型, 传入两个损失
#model.compile(loss=["mse", "mse"], loss_weights=[0.9, 0.1], optimizer="sgd") 
# 或者
model.compile(loss={"main_output": "mse", "aux_output": "mse"}, 
              loss_weights={"main_output": 0.9, "aux_output": 0.1},
             optimizer="sgd")
# 训练模型
history = model.fit([X_train_A, X_train_B], [y_train, y_train], epochs=20, 
                   validation_data=([X_valid_A, X_valid_B], [y_valid, y_valid])
                   )
Epoch 1/20
363/363 [==============================] - 2s 4ms/step - loss: 1.9536 - main_output_loss: 1.7851 - aux_output_loss: 3.4700 - val_loss: 0.6366 - val_main_output_loss: 0.5607 - val_aux_output_loss: 1.3194
Epoch 2/20
363/363 [==============================] - 1s 3ms/step - loss: 0.6281 - main_output_loss: 0.5603 - aux_output_loss: 1.2387 - val_loss: 0.5552 - val_main_output_loss: 0.4995 - val_aux_output_loss: 1.0565
Epoch 3/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5744 - main_output_loss: 0.5260 - aux_output_loss: 1.0099 - val_loss: 0.5148 - val_main_output_loss: 0.4706 - val_aux_output_loss: 0.9129
Epoch 4/20
363/363 [==============================] - 1s 2ms/step - loss: 0.5021 - main_output_loss: 0.4630 - aux_output_loss: 0.8539 - val_loss: 0.4843 - val_main_output_loss: 0.4504 - val_aux_output_loss: 0.7895
Epoch 5/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4632 - main_output_loss: 0.4335 - aux_output_loss: 0.7312 - val_loss: 0.4706 - val_main_output_loss: 0.4434 - val_aux_output_loss: 0.7149
Epoch 6/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4620 - main_output_loss: 0.4378 - aux_output_loss: 0.6806 - val_loss: 0.4595 - val_main_output_loss: 0.4355 - val_aux_output_loss: 0.6755
Epoch 7/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4595 - main_output_loss: 0.4388 - aux_output_loss: 0.6455 - val_loss: 0.4607 - val_main_output_loss: 0.4399 - val_aux_output_loss: 0.6477
Epoch 8/20
363/363 [==============================] - 1s 3ms/step - loss: 0.4658 - main_output_loss: 0.4472 - aux_output_loss: 0.6332 - val_loss: 0.4569 - val_main_output_loss: 0.4371 - val_aux_output_loss: 0.6358
Epoch 9/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4455 - main_output_loss: 0.4259 - aux_output_loss: 0.6216 - val_loss: 0.4429 - val_main_output_loss: 0.4249 - val_aux_output_loss: 0.6049
Epoch 10/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4466 - main_output_loss: 0.4294 - aux_output_loss: 0.6006 - val_loss: 0.4294 - val_main_output_loss: 0.4108 - val_aux_output_loss: 0.5972
Epoch 11/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4455 - main_output_loss: 0.4289 - aux_output_loss: 0.5946 - val_loss: 0.4389 - val_main_output_loss: 0.4233 - val_aux_output_loss: 0.5795
Epoch 12/20
363/363 [==============================] - 1s 3ms/step - loss: 0.4213 - main_output_loss: 0.4054 - aux_output_loss: 0.5636 - val_loss: 0.4135 - val_main_output_loss: 0.3963 - val_aux_output_loss: 0.5685
Epoch 13/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4263 - main_output_loss: 0.4099 - aux_output_loss: 0.5739 - val_loss: 0.4115 - val_main_output_loss: 0.3954 - val_aux_output_loss: 0.5572
Epoch 14/20
363/363 [==============================] - 1s 3ms/step - loss: 0.4106 - main_output_loss: 0.3963 - aux_output_loss: 0.5387 - val_loss: 0.4036 - val_main_output_loss: 0.3875 - val_aux_output_loss: 0.5487
Epoch 15/20
363/363 [==============================] - 1s 2ms/step - loss: 0.3971 - main_output_loss: 0.3816 - aux_output_loss: 0.5361 - val_loss: 0.3968 - val_main_output_loss: 0.3810 - val_aux_output_loss: 0.5394
Epoch 16/20
363/363 [==============================] - 1s 2ms/step - loss: 0.4064 - main_output_loss: 0.3918 - aux_output_loss: 0.5379 - val_loss: 0.3908 - val_main_output_loss: 0.3743 - val_aux_output_loss: 0.5388
Epoch 17/20
363/363 [==============================] - 1s 2ms/step - loss: 0.3945 - main_output_loss: 0.3798 - aux_output_loss: 0.5266 - val_loss: 0.4075 - val_main_output_loss: 0.3923 - val_aux_output_loss: 0.5448
Epoch 18/20
363/363 [==============================] - 1s 2ms/step - loss: 0.3919 - main_output_loss: 0.3777 - aux_output_loss: 0.5197 - val_loss: 0.3877 - val_main_output_loss: 0.3728 - val_aux_output_loss: 0.5218
Epoch 19/20
363/363 [==============================] - 1s 2ms/step - loss: 0.3860 - main_output_loss: 0.3718 - aux_output_loss: 0.5135 - val_loss: 0.3741 - val_main_output_loss: 0.3599 - val_aux_output_loss: 0.5015
Epoch 20/20
363/363 [==============================] - 1s 2ms/step - loss: 0.3684 - main_output_loss: 0.3546 - aux_output_loss: 0.4929 - val_loss: 0.3694 - val_main_output_loss: 0.3553 - val_aux_output_loss: 0.4956
# 绘制学习曲线
pd.DataFrame(history.history).plot()
plt.grid(True)
plt.gca().set_xlim(0,25) # 设置X轴范围
plt.show()

在这里插入图片描述

# 评估模型, 需要传入两个输入,两个输出
total_loss, main_loss, aux_loss = model.evaluate([X_test_A, X_test_B], [y_test, y_test])
162/162 [==============================] - 0s 2ms/step - loss: 0.3692 - main_output_loss: 0.3550 - aux_output_loss: 0.4975
total_loss, main_loss, aux_loss
(0.3692256808280945, 0.3549768924713135, 0.4974652826786041)
# 使用模型预测
y_pred_main, y_pred_aux = model.predict([X_new_A, X_new_B])
y_pred_main, y_pred_aux
(array([[3.242892 ],
        [3.4176383],
        [1.2492131]], dtype=float32),
 array([[3.1166205],
        [3.2666554],
        [1.4088926]], dtype=float32))

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值