闲散的猫狗识别,使用新的版本软件会报各种奇奇怪警告提示,发现是需要构建成dataset,在对应官网上的实例写的很分散,这里只是做下汇合,给一个整体视野。
import matplotlib.pyplot as plt
import tensorflow as tf
BATCH_SIZE = 32
IMG_SIZE = (160, 160)
# 数据集地址:https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip
train_dataset = tf.keras.utils.image_dataset_from_directory("tmp/cats_and_dogs/train", batch_size=BATCH_SIZE,
image_size=IMG_SIZE, shuffle=True)
validation_dataset = tf.keras.utils.image_dataset_from_directory("tmp/cats_and_dogs/validation", batch_size=BATCH_SIZE,
image_size=IMG_SIZE, shuffle=True)
class_names = train_dataset.class_names
print("类型:", class_names)
# plt.figure(figsize=(10, 10))
# for images, labels in train_dataset.take(1):
# for i in range(9):
# ax = plt.subplot(3, 3, i + 1)
# plt.imshow(images[i].numpy().astype("uint8"))
# plt.title(class_names[labels[i]])
# plt.axis("off")
#
# plt.show()
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
IMG_SHAPE = IMG_SIZE + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
model = tf.keras.Sequential([
tf.keras.Input(shape=(160, 160, 3)),
tf.keras.layers.RandomFlip('horizontal'),
tf.keras.layers.RandomRotation(0.2),
tf.keras.layers.Rescaling(1./127.5, offset=-1),
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1)
])
# for image, _ in train_dataset.take(1):
# plt.figure(figsize=(10, 10))
# first_image = image[0]
# for i in range(9):
# ax = plt.subplot(3, 3, i + 1)
# augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
# plt.imshow(augmented_image[0] / 255)
# plt.axis('off')
# Create the base model from the pre-trained model MobileNet V2
# image_batch, label_batch = next(iter(train_dataset))
# feature_batch = base_model(image_batch)
# print(feature_batch.shape)
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
initial_epochs = 10
history = model.fit(train_dataset,
epochs=initial_epochs,
validation_data=validation_dataset)
# Retrieve a batch of images from the test set
image_batch, label_batch = test_dataset.as_numpy_iterator().next()
predictions = model.predict_on_batch(image_batch).flatten()
# Apply a sigmoid since our model returns logits
predictions = tf.nn.sigmoid(predictions)
predictions = tf.where(predictions < 0.5, 0, 1)
print('Predictions:\n', predictions.numpy())
print('Labels:\n', label_batch)
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(class_names[predictions[i]])
plt.axis("off")
plt.show()