Metrics的基本使用
- Metrics 创建并设置数据模式
- update_state 添加数据
- result().numpy() 获取结果
- reset_states 清空数据
步骤1 :创建meter 测量值处理
acc_meter = metrics.Accuracy()
loss_meter = metrics.Mean()
步骤2 : 更新数据(添加)
acc_meter.update_state(y_true, predict)
loss_meter.update_state(loss)
步骤3:获取结果数据
print(step, "loss", loss_meter.result().numpy())
print(step, "Evaluate Acc:", acc_meter.result().numpy())
步骤4:清空缓存
if step % 100 == 0:
print(step, "loss", loss_meter.result().numpy())
loss_meter.reset_states()
if step % 500 == 0:
print(step, "Evaluate Acc:", acc_meter.result().numpy())
acc_meter.reset_states()
测试使用
import tensorflow as tf
import numpy as np
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print("物理GPU个数:", len(gpus))
batch_size = 128
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
def pre_process(x, y):
x = tf.cast(x, tf.float32) / 255.0
y = tf.cast(y, tf.int32)
return x, y
db_train = tf.data.Dataset.from_tensor_slices((x_train, y_train))
db_train = db_train.map(pre_process).shuffle(60000).batch(batch_size).repeat(10)
db_test = tf.data.Dataset.from_tensor_slices((x_test, y_test))
db_test = db_test.map(pre_process).batch(batch_size)
db_sample = iter(db_train)
sample = next(db_sample)
print("X:", sample[0].shape, "Y:", sample[1].shape)
model = tf.keras.Sequential([tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dense(32, activation="relu"),
tf.keras.layers.Dense(10)])
model.build(input_shape=(None, 28 * 28))
model.summary()
optimizer = tf.keras.optimizers.Adam(lr=0.001)
acc_meter = tf.keras.metrics.Accuracy()
loss_meter = tf.keras.metrics.Mean()
for step, (x_batch, y_batch) in enumerate(db_train):
with tf.GradientTape() as tape:
x_batch = tf.reshape(x_batch, (-1, 28 * 28))
y_one_hot = tf.one_hot(y_batch, depth=10)
y_out = model(x_batch)
loss = tf.reduce_mean(tf.losses.categorical_crossentropy(y_one_hot, y_out, from_logits=True))
loss_meter.update_state(loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
if step % 100 == 0:
print(step, "loss:", loss_meter.result().numpy())
loss_meter.reset_states()
if step % 500 == 0:
total, total_correct = 0., 0.0
acc_meter.reset_states()
for _, (x_test_batch, y_test_batch) in enumerate(db_test):
x_test_batch = tf.reshape(x_test_batch, (-1, 28 * 28))
y_test_out = model(x_test_batch)
y_test_out = tf.argmax(y_test_out, axis=1)
predict = tf.cast(y_test_out, dtype=tf.int32)
correct = tf.equal(predict, y_test_batch)
total_correct += tf.reduce_sum(tf.cast(correct, tf.int32)).numpy()
total += x_test_batch.shape[0]
acc_meter.update_state(y_test_batch, predict)
acc = total_correct/total
print(step, "Acc", acc, acc_meter.result().numpy())