Tensorflow迁移学习

使用别人已经训练好的模型来训练自己的任务

速度快,效果好

两种方法

        1.加层

        2.fine tune

首先我们需要安装tensorflow

pip install tensorflow-gpu==2.8.1

查看自己安装的版本

import tensorflow as tf
tf.__version__

导入自己所需要的包

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator

import os
import numpy as np
import matplotlib.pyplot as plt

下载数据集

_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'

path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')

# 可选模型:densenet、inception、mobilenet、resnet、VGG
base_model = tf.keras.applications.ResNet50(weights='imagenet')

打印模型信息

base_model.summary()

设置参数

base_model.trainable = False

batch_size = 64
epochs = 10
IMG_HEIGHT = 224
IMG_WIDTH = 224

使用图像生成器

train_image_generator = ImageDataGenerator()
validation_image_generator = ImageDataGenerator()

生成训练数据批量增强样本

train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size, directory=train_dir, shuffle=True, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary')

获取训练数据批次样本

sample_training_images, _ = next(train_data_gen)

图像绘制

def plotImages(images_arr):
  fig, axes = plt.subplots(1, 3, figsize=(10, 10))
  axes = axes.flatten()
  for img, ax in zip(images_arr, axes):
    ax.imshow(img)
    ax.axis('off')
  plt.tight_layout()
  plt.show()

生成验证数据批量增强样本

val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size, directory=validation_dir, target_size=(IMG_HEIGHT, IMG_WIDTH), class_mode='binary')

显示出验证数据中前3张图像的可视化结果

sample_testing_images, _ = next(val_data_gen)
plotImages(sample_testing_images[:3])

定义两个全连接层,用作模型预测

prediction_layer1 = tf.keras.layers.Dense(128, activation='relu')
prediction_layer2 = tf.keras.layers.Dense(1, activation='sigmoid')

打印模型摘要信息

model.summary()

编译并训练一个网络模型

model.compile(optimizer=tf.keras.optimizers.Adam(), loss='binary_crossentropy', metrics=['accuracy'])

history = model.fit_generator(train_data_gen, epochs=epochs, validation_data=val_data_gen)

绘制训练过程中的准确率和损失曲线

# from matplotlib import figure
# print(history.history)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('accuracy')
plt.ylim([0, 1.0])
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()

微调神经网络模型中的部分层

base_model.trainable = True

print("Number of layers in the base model:", len(base_model.layers))

fine_tune_at = 150

for layer in base_model.layers[:fine_tune_at]:
  layer.trainable = False

base_model.summary()

定义一个新模型,编译并训练这个模型

prediction_layer = tf.keras.layers.Dense(1, activation='sigmoid')
model = tf.keras.Sequential([
    base_model,
    prediction_layer
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
              loss='binary_crossentropy',
              metrics=['accuracy']
              )
history = model.fit_generator(
    train_data_gen,
    epochs=epochs*2,
    validation_data=val_data_gen
)

绘制训练过程中的准确率和损失曲线

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel

完整代码

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator

import os
import numpy as np
import matplotlib.pyplot as plt

_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'

path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)

PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')

train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')

base_model = tf.keras.applications.ResNet50(weights='imagenet')

base_model.summary()

base_model.trainable = False

batch_size = 64
epochs = 10
IMG_HEIGHT = 224
IMG_WIDTH = 224

train_image_generator = ImageDataGenerator() 
validation_image_generator = ImageDataGenerator() 

train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                              directory=train_dir,
                              shuffle=True,
                              target_size=(IMG_HEIGHT, IMG_WIDTH),
                              class_mode='binary')
                              
sample_training_images, _ = next(train_data_gen)

def plotImages(images_arr):
    fig, axes = plt.subplots(1, 3, figsize=(10,10))
    axes = axes.flatten()
    for img, ax in zip( images_arr, axes):
        ax.imshow(img)
        ax.axis('off')
    plt.tight_layout()
    plt.show()
    
val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
                                directory=validation_dir,
                                target_size=(IMG_HEIGHT, IMG_WIDTH),
                               class_mode='binary')
                             
sample_testing_images, _ = next(val_data_gen)
plotImages(sample_testing_images[:3])

prediction_layer1 = tf.keras.layers.Dense(128,activation='relu')
prediction_layer2 = tf.keras.layers.Dense(1,activation='sigmoid')

model = tf.keras.Sequential([
  base_model,
  prediction_layer1,
  prediction_layer2
])

model.summary()

model.compile(optimizer=tf.keras.optimizers.Adam(),
              loss='binary_crossentropy',
              metrics=['accuracy'])
              
history = model.fit_generator(
    train_data_gen,
    epochs=epochs,
    validation_data=val_data_gen
)

acc = history.history['acc']
val_acc = history.history['val_acc']

loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()

base_model.trainable = True

print("Number of layers in the base model: ", len(base_model.layers))

fine_tune_at = 150

for layer in base_model.layers[:fine_tune_at]:
  layer.trainable =  False
  
base_model.summary()

prediction_layer = tf.keras.layers.Dense(1,activation='sigmoid')
model = tf.keras.Sequential([
  base_model,
  prediction_layer
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
              loss='binary_crossentropy',
              metrics=['accuracy'])
history = model.fit_generator(
    train_data_gen,
    epochs=epochs*2,
    validation_data=val_data_gen
)

acc = history.history['accuracy']
val_acc = history.history['val_accuracy']

loss = history.history['loss']
val_loss = history.history['val_loss']

plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')

plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('loss')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()

  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值