metric的使用
metric是指标的意思,如果使用均方差作为指标,那么metric的使用可以如下:
# metric使用
metric = keras.metrics.MeanSquaredError()
print(metric([5.], [2.]))
print(metric([0.], [1.]))
print(metric.result())
metric.reset_states()
metric([1.], [3.])
print(metric.result())
注:metric是可以累加上一步的结果的,如果不想累加就用metric.reset_states()清空上一步的结果。
tf.Tensor(9.0, shape=(), dtype=float32)
tf.Tensor(5.0, shape=(), dtype=float32)
tf.Tensor(5.0, shape=(), dtype=float32)
tf.Tensor(4.0, shape=(), dtype=float32)
keras手动模拟model.fit操作
# 在fit的过程中会执行如下过程
# 1. batch 遍历训练集 metric
# 1.1 自动求导
# 2. epoch结束 验证集 metric
epochs = 100
batch_size = 32
steps_per_epoch = len(x_train_scaled) // batch_size
optimizer = keras.optimizers.SGD()
metric = keras.metrics.MeanSquaredError()
def random_batch(x, y, batch_size=32):
idx = np.random.randint(0, len(x), size=batch_size)
return x[idx], y[idx]
model = keras.models.Sequential([
keras.layers.Dense(30, activation='relu',
input_shape=x_train.shape[1:]),
keras.layers.Dense(1),
])
for epoch in range(epochs):
metric.reset_states()
for step in range(steps_per_epoch):
# 获取数据
x_batch, y_batch = random_batch(x_train_scaled, y_train,
batch_size)
with tf.GradientTape() as tape:
# 获取预测值
y_pred = model(x_batch)
# 得到loss
loss = tf.reduce_mean(
keras.losses.mean_squared_error(y_batch, y_pred))
metric(y_batch, y_pred)
# 手动求梯度
grads = tape.gradient(loss, model.variables)
# 将梯度和变量绑定
grads_and_vars = zip(grads, model.variables)
# 将梯度添加
optimizer.apply_gradients(grads_and_vars)
print("\rEpoch", epoch, " train mse:",
metric.result().numpy(), end="")
# 验证集验证
y_valid_pred = model(x_valid_scaled)
# 在验证集上只用valid一个loss就行,不需要使用累计的loss,所以直接使用keras.losses.mean_squared_error
valid_loss = tf.reduce_mean(
keras.losses.mean_squared_error(y_valid_pred, y_valid))
print("\t", "valid mse: ", valid_loss.numpy())