一、模型
可调整每个全连接层的大小,是否启用dropout。
pooling可选值:max, avg
from keras import Model
from keras.layers import Dense, Layer, BatchNormalization, ReLU, Dropout,Input
from keras.applications.vgg16 import VGG16
class MyVGG16(Model):
def __init__(self, classes: int, pooling: str = 'max', input_shape=None,
fc1: int = 4096, fc2: int = 4096, dp1=0, dp2=0):
super().__init__()
self.i = Input(shape=input_shape) # used ro reveal output shape
# extract VGG16 layers
self.ls = [layer for layer in VGG16(
weights='imagenet',
include_top=False,
classes=classes,
input_shape=input_shape,
pooling=pooling
).layers]
# fully connected layers
self.fc1 = FC(fc1, dp1)
self.fc2 = FC(fc2, dp2)
self.fc3 = Dense(classes, activation="softmax")
self.call(self.i)
def call(self, inputs, training=None, mask=None):
outputs = self.ls[0](inputs)
for i in range(1, len(self.ls)):
outputs = self.ls[i](outputs)
outputs = self.fc1(outputs)
outputs = self.fc2(outputs)
outputs = self.fc3(outputs)
return outputs
class FC(Layer):
def __init__(self, num, dp=0):
super().__init__()
self.fc = Dense(num)
self.nb = BatchNormalization()
self.relu = ReLU()
if dp == 0:
self.dp = lambda x: x
else:
self.dp = Dropout(dp)
def call(self, inputs, *args, **kwargs):
outputs = self.fc(inputs)
outputs = self.nb(outputs)
outputs = self.relu(outputs)
outputs = self.dp(outputs)
return outputs
二、用例
import tensorflow as tf
from tensorflow import keras
from VGG16 import MyVGG16
from keras.datasets import cifar10
# set GPU
using_gpu_index = 0
gpu_list = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_list) > 0:
try:
tf.config.experimental.set_virtual_device_configuration(
gpu_list[using_gpu_index],
[tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3072)] # limit the size of GPU memory
)
except RuntimeError as e:
print(e)
else:
print("Got no GPUs")
# data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train / 255
x_test = x_test / 255
# model construction
model = MyVGG16(10,fc1=256,fc2=128,dp1=0.2,dp2=0.2,input_shape=(32, 32, 3))
model.build((None,32,32,3))
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
model.summary()
# train
model.fit(x_train, y_train, epochs=100, batch_size=32)
y_eva = model.evaluate(x_test, y_test, return_dict=True)
github: GitHub - VAMPIREONETWO/VGG16: Encapsulated VGG16 with Adjustable Fully Connected Layers