LeNet+AlexNet+ResNet+Inception+VGG

LeNet

class LeNet(Model):
    def __init__(self):
        super(LeNet, self).__init__()
        self.c1 = Conv2D(filters=6, kernel_size=(5,5),
                         activation = 'sigmoid')
        self.p1 = MaxPool2D(pool_size=(2,2), strides=2)

        self.c2 = Conv2D(filters=16, kernel_size=(5,5),
                         activation='sigmoid')
        self.p2 = MaxPool2D(pool_size=(2,2), strides=2)

        self.flatten = Flatten()
        self.f1 = Dense(120, activation='sigmoid')
        self.f2 = Dense(84,activation='sigmoid')
        self.f3 = Dense(10,activation='softmax')


    def call(self, x):
        x = self.c1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.p2(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.f2(x)
        x = self.f3(x)
        return y

AlexNet

class AlexNet(Model):
    def __init__(self):
        super(AlexNet, self).__init__()
        self.c1 = Conv2d(filters=96, kernel_size=(3,3))
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(3,3), strides=2)

        self.c2 = Conv2D(filters=256, kernel_size=(3,3))
        self.b2 = BatchNormalization()
        self.a2 = Activation("relu")
        self.p2 = MaxPool2D(pool_size=(3,3), strides=2)

        self.c3 = Conv2D(filters=384, kernel_size=(3,3), padding='same',
                         activation='relu')

        self.c4 = Conv2D(filters=384, kernel_size=(3,3), padding='same',
                         activation='relu')
        self.c5 = Conv2D(filters=256, kernel_size=(3,3), padding='same',
                         activation='relu')
        self.p3 = MaxPool2D(pool_size=(3,3), strides=2)


        self.flatten = Flatten()
        self.f1 = Dense(2048, activation='relu')
        self.d1 = Dropout(0.5)
        self.f2 = Dense(2048,activation='relu')
        self.d2 = Dropout(0.5)
        self.f3 = Dense(10,activation='softmax')
    def call(self,x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p2(x)

        x = self.c3(x)

        x = self.c4(x)

        x = self.c5(x)
        x = self.p3(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d1(x)
        x = self.f2(x)
        x = self.d2(x)
        x = self.f3(x)
        return y

ResNet

class ResnetBlock(Model):
    def __init__(self,filters,strides=1,residual_path = False):
        super(ResnetBlock,self).__init__()
        self.filters = filters
        self.strides = strides
        self.resideual_path = residual_path

        self.c1 = Conv2D(filters,(3,3),strides=strides,padding = 'same',use_bias = False)
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')

        self.c2 = Conv2D(filters,(3,3),strides=1,padding = 'same',use_bias = False)
        self.b2 = BatchNormalization()
        #residual_path为Ture时,对输入进行下采样,即用1x1的卷积核做卷积操作,保证x能和F(x)维度相同,顺利相加。
        if residual_path:
            self.down_c1 = Conv2D(filters,(1,1),strides =strides,padding='same',use_bias=False)
            self.down_b1 = BatchNormalization()

        self.a2 = Activation('relu')
    def call(self,inputs):
        residual = inputs  #residual等于输入值本身,即 residual= x
        #将输入通过卷积、BN层、激活层,计算F(x)
        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)

        x = self.c2(x)
        y = self.b2(x)

        if self.residual_path:
            residual = self.down_c1(inputs)
            residual = self.down_b1(residual)
        out = self.a2(y + residual)   #最后输出的是两部分的和,即F(x)+x或F(x)+wx,再通过激活函数
        return out



class ResNet18(Model):
    def __init__(self,block_list,initial_filters=64): #block_list表示每个block有几个卷积层
        super(ResNet18, self).__init__()
        self.num_blocks = len(block_list)  #共有几个block
        self.block_list = block_list
        self.out_filters = initial_filters
        self.c1 = Conv2D(self.out_filters,(3,3),strides=1,padding ='same',use_bias =False,
                        kernel_initializer = 'he_normal')
        self.b1 = tf.keras.layers.BatchNormalization()
        self.a1 = Activation('relu')
        self.blocks = tf.keras.model.Sequential()
        # 构建ResNet网络结构
        for block_id in range(len(block_list)): #第几个resnet_block
            for layer_id in range(block_list[block_id]): #第几个卷积层

                if block_id !=0 and layer_id ==0: #对除第一个block以外的每个block的输入进行下采样
                    block = ResnetBlock(self.out_filters,strides=2,residual_path=True)
                else:
                    block =ResnetBlock(self.out_filters,residual_path=False)
                self.block.add(block)  #将构建好的block加入resnet
            self.out_filters *=2
        self.p1 = tf.keras.layers.GlobalAveragePooling2D()
        self.f1 = tf.keras.layers.Dense(10)

    def call(self,inputs):
        x = self.c1(inputs)
        x = self.b1(x)
        x = self.a1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y


model = ResNet18([2,2,2,2])

InceptionNet

class ConvBNRelu(Model):
    def __init__(self, ch, kernelsz=3, strides=1, padding='same'):
        super(ConvBNRelu, self).__init__()
        self.model = tf.keras.models.Sequential([
            Conv2D(ch,kernelsz,strides=strides,padding=padding),
            BatchNormalization(),
            Activation('relu')
            ])
    def call(self,x):
        x = self.model(x)
        return x

class InceptionBlk(Model):
    def __init__(self, ch, strides=1):
        super(InceptionBlk, self).__init__()
        self.ch = ch
        self.strides = strides
        self.c1 = ConvBNRelu(ch,kernelsz=1,strides=strides)
        self.c2_1 = ConvBNRelu(ch,kernelsz=1,strides=strides)
        self.c2_2 = ConvBNRelu(ch,kernelsz=3,strides=1)
        self.c3_1 = ConvBNRelu(ch,kernelsz=1, strides=strides)
        self.c3_2 = ConvBNRelu(ch,kernelsz=5,strides=1)
        self.p4_1 = MaxPool2D(3,strides=1,padding='same')
        self.c4_2 = ConvBNRelu(ch,kernelsz=1,strides=strides)


    def call(self,x):
        x1 = self.c1(x)
        x2_1 = self.c2_1(x)
        x2_2 = self.c2_2(x2_1)
        x3_1 = self.c3_1(x)
        x3_2 = self.c3_2(x3_1)
        x4_1 = self.p4_1(x)
        x4_2 = self.c4_2(x4_1)
        x = tf.concat([x1,x2_2,x3_2,x4_2],axis=3)
        return x

class Inception(Model):
    def __init__(self,num_blocks,num_casses,init_ch=16,**kwargs):
        super(Inception,self).__init__(**kwargs)
        self.in_channels = init_ch
        self.out_channels = init_ch
        self.num_blocks = num_blocks
        self.init_ch = init_ch
        self.c1 =ConvBNRelu(init_ch)
        self.blocks = tf.keras.models.Sequential()
        for block_id in range(num_blocks):
            for layer_id in range(2):
                if layer_id ==0:
                    block = InceptionBlk(self.out_channels,strides=2)
                else:
                    block = InceptionBlk(self.out_channels,strides=1)
                self.blocks.add(block)

            self.out_channels *= 2
        self.p1 =GlobalAveragePooling2D()
        self.f1 = Dense(num_classes,activation='softmax')


    def call(self,x):
        x = self.c1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)
        return y

model = Inception(num_blocks=2,num_classes=10)

VGG

class VGG16(Model):
    def __init__(self):
        super(VGG16, self).__init__()
        self.c1 = Conv2D(filters=64, kernel_size=(3,3),padding='same')
        self.b1 = BatchNormalization()
        self.a1 = Activation('relu')

        self.c2 = Conv2D(filters=64, kernel_size=(3,3),padding='same')
        self.b2 = BatchNormalization()
        self.a2 = Activation('relu')
        self.p1 = MaxPool2D(pool_size=(2,2), strides=2,padding='same')
        self.d1 = Dropout(0.2)

        self.c3 = Conv2D(filters=128, kernel_size=(3,3),padding='same')
        self.b3 = BatchNormalization()
        self.a3 = Activation('relu')

        self.c4 = Conv2D(filters=128, kernel_size=(3,3),padding='same')
        self.b4 = BatchNormalization()
        self.a4 = Activation('relu')
        self.p2 = MaxPool2D(pool_size=(2,2), strides=2,padding='same')
        self.d2 = Dropout(0.2)

        self.c5 = Conv2D(filters=256,kernel_size =(3,3),padding='same')
        self.b5 = BatchNormalization()
        self.a6 = Activation('relu')

        self.c6 = Conv2D(filters=256,kernel_size=(3,3),padding='same')
        self.b6 = BatchNormalization()
        self.a6 = Activation('relu')

        self.c7 = Conv2D(filters=256,kernel_size=(3,3),padding='same')
        self.b7 = BatchNormalization()
        self.a7 = Activation('relu')
        self.p3 = MaxPool2D(pool_size=(2,2),strides=2,padding='same')
        self.d3 = Dropout(0.2)

        self.c8 = Conv2D(filter=512,kernel_size = (3,3),padding = 'same')
        self.b8 = BatcNormalization()
        self.a8 = Activation('relu')

        self.c9 = Conv2D(filters=512,kernel_size=(3,3),padding='same')
        self.b9 = BatchNormalization()
        self.a9 = Activation('relu')

        self.c10 = Conv2D(filters = 512,kernel_size=(3,3),padding='same')
        self.b10 = BatchNormalization()
        self.a10 = Activation('relu')
        self.p4 = MaxPool2D(pool_size=(2,2),strides=2,padding='same')
        self.d4 = Dropout(0.2)

        self.c11 =Conv2D(filters=512,kernel_size = (3,3),padding='same')
        self.b11 = BatchNormalization()
        self.a11 = Activation('relu')

        self.c12 = Conv2D(filters=512,kernel_size=(3,3),padding='same')
        self.b12 = BatchNormalization()
        self.a12 = Activation('relu')

        self.c13 = Conv2D(filters=512, kernel_size=(3,3),padding='same')
        self.b13 = BatchNormalization()
        self.p5 = MaxPool2D(pool_size=(2,2),strides=2,padding='same')
        self.d5 = Dropout(0.2)

        self.flatten = Flatten()
        self.f1 = Dense(512,activation='relu')
        self.d6 = Dropout(0.2)
        self.f2 = Dense(512,activation='relu')
        self.d7 = Dropout(0.2)
        self.f3 = Dense(10,activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)

        x = self.c2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.p1(x)
        x = self.d1(x)

        x = self.c3(x)
        x = self.b3(x)
        x = self.a3(x)

        x = self.c4(x)
        x = self.b4(x)
        x = self.a4(x)
        x = self.p2(x)
        x = self.d2(x)

        x = self.c5(x)
        x = self.b5(x)
        x = self.a5(x)

        x = self.c6(x)
        x = self.b6(x)
        x = self.a6(x)

        x = self.c7(x)
        x = self.b7(x)
        x = self.a7(x)
        x = self.p3(x)
        x = self.d3(x)

        x = self.c8(x)
        x = self.b8(x)
        x = self.a8(x)

        x = self.c9(x)
        x = self.b9(x)
        x = self.a9(x)

        x = self.c10(x)
        x = self.b10(x)
        x = self.a10(x)
        x = self.p4(x)
        x = self.d4(x)

        x = self.c11(x)
        x = self.b11(x)
        x = self.a11(X)

        X = self.c12(x)
        x = self.b12(x)
        x = self.a12(x)

        x = self.c13(x)
        x = self.b13(x)
        x = self.a13(x)
        x = self.p5(x)
        x = self.d5(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d6(x)
        x = self.f2(x)
        x = self.d7(x)
        y = self.f3(x)
        return y
  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值