再次封装的VGG16(可调整的全连接层部分)

一、模型

可调整每个全连接层的大小,是否启用dropout。

pooling可选值:max, avg

from keras import Model
from keras.layers import Dense, Layer, BatchNormalization, ReLU, Dropout,Input
from keras.applications.vgg16 import VGG16


class MyVGG16(Model):
    def __init__(self, classes: int, pooling: str = 'max', input_shape=None,
                 fc1: int = 4096, fc2: int = 4096, dp1=0, dp2=0):
        super().__init__()

        self.i = Input(shape=input_shape)  # used ro reveal output shape

        # extract VGG16 layers
        self.ls = [layer for layer in VGG16(
            weights='imagenet',
            include_top=False,
            classes=classes,
            input_shape=input_shape,
            pooling=pooling
        ).layers]

        # fully connected layers
        self.fc1 = FC(fc1, dp1)
        self.fc2 = FC(fc2, dp2)
        self.fc3 = Dense(classes, activation="softmax")

        self.call(self.i)

    def call(self, inputs, training=None, mask=None):
        outputs = self.ls[0](inputs)
        for i in range(1, len(self.ls)):
            outputs = self.ls[i](outputs)
        outputs = self.fc1(outputs)
        outputs = self.fc2(outputs)
        outputs = self.fc3(outputs)
        return outputs


class FC(Layer):
    def __init__(self, num, dp=0):
        super().__init__()

        self.fc = Dense(num)
        self.nb = BatchNormalization()
        self.relu = ReLU()
        if dp == 0:
            self.dp = lambda x: x
        else:
            self.dp = Dropout(dp)

    def call(self, inputs, *args, **kwargs):
        outputs = self.fc(inputs)
        outputs = self.nb(outputs)
        outputs = self.relu(outputs)
        outputs = self.dp(outputs)
        return outputs

二、用例

import tensorflow as tf
from tensorflow import keras
from VGG16 import MyVGG16
from keras.datasets import cifar10

# set GPU
using_gpu_index = 0
gpu_list = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_list) > 0:
    try:
        tf.config.experimental.set_virtual_device_configuration(
            gpu_list[using_gpu_index],
            [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3072)]  # limit the size of GPU memory
        )
    except RuntimeError as e:
        print(e)
else:
    print("Got no GPUs")

# data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train / 255
x_test = x_test / 255

# model construction
model = MyVGG16(10,fc1=256,fc2=128,dp1=0.2,dp2=0.2,input_shape=(32, 32, 3))
model.build((None,32,32,3))
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001),
              loss=keras.losses.SparseCategoricalCrossentropy(),
              metrics=['accuracy'])
model.summary()

# train
model.fit(x_train, y_train, epochs=100, batch_size=32)
y_eva = model.evaluate(x_test, y_test, return_dict=True)

github: GitHub - VAMPIREONETWO/VGG16: Encapsulated VGG16 with Adjustable Fully Connected Layers

  • 7
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

VAMOT

您的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值