Inception10网络在python下TensorFlow2的实现

参考资料: 北京大学, 软微学院, 曹健老师, 《人工智能实践:TensorFlow2.0笔记》
运行环境:
python3.7
tensorflow 2.1.0
numpy 1.17.4
matplotlib 3.2.1

# inception10
import tensorflow as tf
import os
import numpy as np
from matplotlib import pyplot as plt
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense,\
    GlobalAveragePooling2D
from tensorflow.keras import Model

np.set_printoptions(threshold=np.inf)

cifar10 = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

x_train, x_test = x_train / 255.0, x_test / 255.0


class ConvBNRelu(Model):
    # 一个卷积,标准化,激活的模型
    def __init__(self, ch, kernelsz=3, strides=1, padding='same'):
        super(ConvBNRelu, self).__init__()
        self.model = tf.keras.models.Sequential([
            Conv2D(ch, kernelsz, strides=strides, padding=padding),
            BatchNormalization(),
            Activation('relu')
        ])

    def call(self, x):
        x = self.model(x, training=False)  # training=False, BN通过整个训练集计算均值、方差做归一化
        return x


# 一个inception块的定义
class InceptionBlk(Model):
    def __init__(self, ch, strides=1):
        super(InceptionBlk, self).__init__()
        self.ch = ch
        self.strides = strides
        self.c1 = ConvBNRelu(ch, kernelsz=1, strides=strides)

        self.c2_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c2_2 = ConvBNRelu(ch, kernelsz=3, strides=1)

        self.c3_1 = ConvBNRelu(ch, kernelsz=1, strides=strides)
        self.c3_2 = ConvBNRelu(ch, kernelsz=5, strides=1)

        self.p4_1 = MaxPool2D(3, strides=1, padding='same')
        self.c4_2 = ConvBNRelu(ch, kernelsz=1, strides=strides)

    def call(self, x):
        # *16
        x1 = self.c1(x)

        # *16
        x2_1 = self.c2_1(x)
        x2_2 = self.c2_2(x2_1)

        # *16
        x3_1 = self.c3_1(x)
        x3_2 = self.c3_2(x3_1)

        # *16
        x4_1 = self.p4_1(x)
        x4_2 = self.c4_2(x4_1)

        # 合并 1*32*32*64
        x = tf.concat([x1, x2_2, x3_2, x4_2], axis=3)
        return x


class Inception10(Model):
    def __init__(self, num_blocks, num_classes, init_ch=16, **kwargs):
        super(Inception10, self).__init__(**kwargs)
        # filters 数目 有两个block
        self.in_channels = init_ch
        self.out_channels = init_ch
        self.num_blocks = num_blocks
        self.init_ch = init_ch
        self.c1 = ConvBNRelu(init_ch)
        # 构造网络结构
        self.blocks = tf.keras.models.Sequential()

        for block_id in range(num_blocks):
            for layer_id in range(2):
                if layer_id == 0:
                    block = InceptionBlk(self.out_channels, strides=2)
                else:
                    block = InceptionBlk(self.out_channels, strides=1)
                self.blocks.add(block)

            # 扩大
            self.out_channels *=2
            # 第一块输出1*32*32*64  16*4
            # 第二块输出1*32*32*128 32*4
        self.p1 = GlobalAveragePooling2D()
        self.f1 = Dense(num_classes, activation='softmax')

    def call(self, x):

        x = self.c1(x)
        x = self.blocks(x)
        x = self.p1(x)
        y = self.f1(x)

        return y


# 分10类 2个块,每个块2个子块, 子块有4种
model = Inception10(num_blocks=2, num_classes=10)
model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy']
              )

checkpoint_save_path = './checkpoint/Inception10.ckpt'
if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------load the model-----------------')
    model.load_weights(checkpoint_save_path)

cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                 save_weights_only=True,
                                                 save_best_only=True)

history = model.fit(x_train, y_train, batch_size=128, epochs=5, validation_data=(x_test, y_test), validation_freq=1, callbacks=[cp_callback])
model.summary()

# print(model.trainable_variables)
file = open('./weights_inception10.txt', 'w')
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()

###############################################    show   ###############################################

# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

  • 0
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
好的,以下是使用TensorFlow在CIFAR数据集上验证GoogLeNet神经网络性能的步骤: 1. 下载CIFAR数据集并解压缩,可以使用以下代码: ```python import tensorflow as tf from tensorflow.keras.datasets import cifar10 (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize pixel values to be between 0 and 1 x_train, x_test = x_train / 255.0, x_test / 255.0 ``` 2. 构建GoogLeNet模型,可以使用以下代码: ```python from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, AveragePooling2D, Dropout, Flatten, concatenate def inception_module(prev_layer, filters): # 1x1 convolution conv1 = Conv2D(filters[0], (1,1), padding='same', activation='relu')(prev_layer) # 3x3 convolution conv3 = Conv2D(filters[1], (3,3), padding='same', activation='relu')(prev_layer) # 5x5 convolution conv5 = Conv2D(filters[2], (5,5), padding='same', activation='relu')(prev_layer) # Max pooling pool = MaxPooling2D((3,3), strides=(1,1), padding='same')(prev_layer) pool_conv = Conv2D(filters[3], (1,1), padding='same', activation='relu')(pool) # Concatenate all filters concat = concatenate([conv1, conv3, conv5, pool_conv], axis=-1) return concat def googlenet(input_shape, num_classes): inputs = Input(shape=input_shape) # First convolutional layer x = Conv2D(64, (7,7), strides=(2,2), padding='same', activation='relu')(inputs) x = MaxPooling2D((3,3), strides=(2,2), padding='same')(x) # Second convolutional layer x = Conv2D(192, (3,3), strides=(1,1), padding='same', activation='relu')(x) x = MaxPooling2D((3,3), strides=(2,2), padding='same')(x) # First inception module x = inception_module(x, filters=[64, 96, 128, 16, 32, 32]) # Second inception module x = inception_module(x, filters=[128, 128, 192, 32, 96, 64]) # Third inception module x = inception_module(x, filters=[192, 96, 208, 16, 48, 64]) # Max pooling layer x = MaxPooling2D((3,3), strides=(2,2), padding='same')(x) # Fourth inception module x = inception_module(x, filters=[160, 112, 224, 24, 64, 64]) # Fifth inception module x = inception_module(x, filters=[128, 128, 256, 24, 64, 64]) # Sixth inception module x = inception_module(x, filters=[112, 144, 288, 32, 64, 64]) # Seventh inception module x = inception_module(x, filters=[256, 160, 320, 32, 128, 128]) # Max pooling layer x = MaxPooling2D((3,3), strides=(2,2), padding='same')(x) # Eighth inception module x = inception_module(x, filters=[256, 160, 320, 32, 128, 128]) # Ninth inception module x = inception_module(x, filters=[384, 192, 384, 48, 128, 128]) # Dropout layer x = Dropout(0.4)(x) # Flatten layer x = Flatten()(x) # Fully connected layer outputs = Dense(num_classes, activation='softmax')(x) model = Model(inputs=inputs, outputs=outputs) return model ``` 3. 编译和训练模型,可以使用以下代码: ```python model = googlenet(input_shape=(32, 32, 3), num_classes=10) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test)) ``` 4. 评估模型性能,可以使用以下代码: ```python test_loss, test_acc = model.evaluate(x_test, y_test) print('Test accuracy:', test_acc) ``` 以上就是使用tensorflow框架在cifar数据集上验证googlenet神经网络性能的步骤。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值