class MnistModel(Model):
def __init__(self):
super(MnistModel, self).__init__()
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu')
self.d2 = Dense(10, activation='softmax')
def call(self, x):
x = self.flatten(x)
x = self.d1(x)
y = self.d2(x)
return y
model = MnistModel()
from tensorflow.keras.layers import Input,Conv2D,MaxPool2D,concatenate
from tensorflow.keras.models import Model
# In[2]:
# 定义模型输入,不含batchsize
inputs = Input(shape=(28,28,192))
# 注意函数式模型的特点,Conv2D后面的(inputs)表示把inputs信号输入到Conv2D中计算
tower_1 = Conv2D(filters=64,kernel_size=(1,1),strides=(1,1),padding='same',activation='relu')(inputs)
# 注意函数式模型的特点,Conv2D后面的(inputs)表示把inputs信号输入到Conv2D中计算
tower_2 = Conv2D(filters=96,kernel_size=(1,1),strides=(1,1),padding='same',activation='relu')(inputs)
# 注意函数式模型的特点,Conv2D后面的(tower_2)表示把tower_2信号输入到Conv2D中计算
tower_2 = Conv2D(filters=128,kernel_size=(3,3),strides=(1,1),padding='same',activation='relu')(tower_2)
# 注意函数式模型的特点,Conv2D后面的(inputs)表示把inputs信号输入到Conv2D中计算
tower_3 = Conv2D(filters=16,kernel_size=(1,1),strides=(1,1),padding='same',activation='relu')(inputs)
# 注意函数式模型的特点,Conv2D后面的(tower_3)表示把tower_3信号输入到Conv2D中计算
tower_3 = Conv2D(filters=32,kernel_size=(5,5),strides=(1,1),padding='same',activation='relu')(tower_3)
# 注意函数式模型的特点,MaxPool2D后面的(inputs)表示把inputs信号输入到MaxPool2D中计算
pooling = MaxPool2D(pool_size=(3, 3),strides=(1, 1),padding='same')(inputs)
# 注意函数式模型的特点,Conv2D后面的(pooling)表示把pooling信号输入到Conv2D中计算
pooling = Conv2D(filters=32,kernel_size=(1,1),strides=(1,1),padding='same',activation='relu')(pooling)
# concatenate合并4个信号,axis=3表示根据channel进行合并,得到模型的输出
outputs = concatenate([tower_1,tower_2,tower_3,pooling],axis=3)
# 定义模型,设置输入和输出信号
通过定义输入及输出,构建了模型
model = Model(inputs=inputs, outputs=outputs)
# 查看模型概要
model.summary()
查看输入数据集的属性:
print(tf.shape(x_train))
构建模型前指明模型输入的大小
model.build(input_shape=(None,28,28,1))
还有模型的嵌套使用
class ConvBNRelu(Model):
def __init__(self, ch, kernelsz=3, strides=1, padding='same'):
super(ConvBNRelu, self).__init__()
self.model = tf.keras.models.Sequential([
Conv2D(ch, kernelsz, strides=strides, padding=padding),
BatchNormalization(),
Activation('relu')
])
def call(self, x):
x = self.model(x, training=False) #在training=False时,BN通过整个训练集计算均值、方差去做批归一化,training=True时,通过当前batch的均值、方差去做批归一化。推理时 training=False效果好
return x
class InceptionBlk(Model):
def __init__(self, ch, strides=1):
super(InceptionBlk, self).__init__()
self.ch = ch
self.strides = strides
self.c1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
self.c2_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
self.c2_2 = ConvBNRelu(ch, kernelsz=3, strides=1)
self.c3_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
self.c3_2 = ConvBNRelu(ch, kernelsz=5, strides=1)
self.p4_1 = MaxPool2D(3, strides=1, padding='same')
self.c4_2 = ConvBNRelu(ch, kernelsz=1, strides=strides)
def call(self, x):
x1 = self.c1(x)
x2_1 = self.c2_1(x)
x2_2 = self.c2_2(x2_1)
x3_1 = self.c3_1(x)
x3_2 = self.c3_2(x3_1)
x4_1 = self.p4_1(x)
x4_2 = self.c4_2(x4_1)
# concat along axis=channel
x = tf.concat([x1, x2_2, x3_2, x4_2], axis=3)
return x
class Inception10(Model):
def __init__(self, num_blocks, num_classes, init_ch=16, **kwargs):
super(Inception10, self).__init__(**kwargs)
self.in_channels = init_ch
self.out_channels = init_ch
self.num_blocks = num_blocks
self.init_ch = init_ch
self.c1 = ConvBNRelu(init_ch)
self.blocks = tf.keras.models.Sequential()
for block_id in range(num_blocks):
for layer_id in range(2):
if layer_id == 0:
block = InceptionBlk(self.out_channels, strides=2)
else:
block = InceptionBlk(self.out_channels, strides=1)
self.blocks.add(block)
# enlarger out_channels per block
self.out_channels *= 2
self.p1 = GlobalAveragePooling2D()
self.f1 = Dense(num_classes, activation='softmax')
def call(self, x):
x = self.c1(x)
x = self.blocks(x)
x = self.p1(x)
y = self.f1(x)
return y
model = Inception10(num_blocks=2, num_classes=10)
函数式模型搭建
于 2022-02-17 06:33:08 首次发布