keras 深度学习之迁移学习和fine tune

一.迁移学习
就是说把别人训练好的网络(比如说卷积特征提取部分)拿过来配合上自己分类器,这样可以增加训练的速度和提高分类效果。

'''
Created on 2018年8月28日

'''
#迁移学习
import keras
from keras.models import Model
from keras.applications import ResNet50
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
 
 
train_data_dir = "data/train"
valid_data_dir = "data/valid"
img_width, img_height = 224,224
batch_size = 32
train_samples_nums = 14000
valid_samples_nums = 1400
 
#迁移学习使用的是ResNet,只需要ResNet的特征提取部分
model = ResNet50(weights="model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5", include_top=False)
datagen = ImageDataGenerator(rescale=1. / 255)
 
#分别定义train,valid的数据生成器
train_generator = datagen.flow_from_directory(
    train_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode=None, #None就不会输出y
    shuffle=False)
valid_generator = datagen.flow_from_directory(
    valid_data_dir,
    target_size=(img_width, img_height),
    batch_size=batch_size,
    class_mode=None,
    shuffle=False)
 
#使用ResNet提取特征,生成新的特征,保存成np格式
bottleneck_train_features = model.predict_generator(train_generator, train_samples_nums // batch_size)
np.save(open('train_features.npy', 'wb'),bottleneck_train_features)
bottleneck_valid_features = model.predict_generator(valid_generator, valid_samples_nums // batch_size)
np.save(open('valid_features.npy', 'wb'), bottleneck_valid_features)
 
#重新读取数据,定义标签
train_data = np.load(open('bottleneck_features_train.npy', 'rb'))
train_labels = np.array([0] * (train_samples_nums // 2) + [1] * (train_samples_nums // 2))
valid_data = np.load(open('bottleneck_features_validation.npy', 'rb'))
valid_labels = np.array([0] * (valid_samples_nums // 2) + [1] * (valid_samples_nums // 2))
#以上就是通过迁移学习提取特征,下面可以使用分类器,对这些特征进行分类.
# 分类器可以是SVM,MLP,CNN,RNN,RF,XGBOOST,lightGBM
# 比如:使用SVM
from sklearn.svm import SVC
clf = SVC()
clf.set_params(kernel="linear",probability=True).fit(train_data, train_labels)

二.fine tune(微调)
一般在迁移学习的时候时候。也就是在将别人的训练好的网络拿过来,然后加上自己分类层(一般使用的是加上自己的卷积层+全连接层,或者直接全连接层),其中别人的网络可以设置为全部可训练,也可以设置部分可训练,最后学习率可以调低一点,一般取0.0001-0.001。

'''
Created on 2018年8月28日

'''
#fine tune
#首先需要自己写个ResNet,这些代码都可以在from keras.applications import ResNet50中拷贝
from keras.layers import Input, Add, Dense, Activation, Flatten, Conv2D, BatchNormalization,MaxPooling2D,Concatenate,Lambda,AveragePooling2D
from keras import backend as K
from keras import layers
from keras.models import Model
from keras.optimizers import SGD

#定义resnet的残差模块
def identity_block(input_tensor, kernel_size, filters, stage, block):

    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size,
               padding='same', name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x


def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
    
    filters1, filters2, filters3 = filters
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2D(filters1, (1, 1), strides=strides,
               name=conv_name_base + '2a')(input_tensor)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters2, kernel_size, padding='same',
               name=conv_name_base + '2b')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
    x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = Conv2D(filters3, (1, 1), strides=strides,
                      name=conv_name_base + '1')(input_tensor)
    shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = Activation('relu')(x)
    return x

def MyResNet():
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    input_shape_img = (224, 224, 3)
    
    img_input = Input(shape=input_shape_img)
    
    x = Conv2D(
        64, (7, 7), strides=(2, 2), padding='same', name='conv1')(img_input)
    x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
    
    #加入自己的全连接层分类器,就行了
    x = Flatten()(x)
    x = Dense(2, activation='softmax', name='fc2')(x)
    
    
    model = Model(img_input, x, name='mymodel')
    return model

model = MyResNet()
model.summary()
#加载权重
resnet_weight = "model/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"
model.load_weights(resnet_weight, by_name=True)
#也可以指定哪一层可以进行训练
for layer in model.layers[:10]:
    layer.trainable = False
#编译模型,然后一般fine tune都使用比较低的学习率
sgd = SGD(lr=0.0001, decay=0.00002, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
  • 3
    点赞
  • 17
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值