tensorflow2搭建squeezenet

model

import numpy as np
import tensorflow as tf
NUM_CLASSES = 10

class FireModule(tf.keras.layers.Layer):
    def __init__(self, s1, e1, e3):
        super(FireModule, self).__init__()
        self.squeeze_layer = tf.keras.layers.Conv2D(filters=s1,
                                                    kernel_size=(1, 1),
                                                    strides=1,
                                                    padding="same")
        self.expand_1x1 = tf.keras.layers.Conv2D(filters=e1,
                                                 kernel_size=(1, 1),
                                                 strides=1,
                                                 padding="same")
        self.expand_3x3 = tf.keras.layers.Conv2D(filters=e3,
                                                 kernel_size=(3, 3),
                                                 strides=1,
                                                 padding="same")

    def call(self, inputs, **kwargs):
        x = self.squeeze_layer(inputs)
        x = tf.nn.relu(x)
        y1 = self.expand_1x1(x)
        y1 = tf.nn.relu(y1)
        y2 = self.expand_3x3(x)
        y2 = tf.nn.relu(y2)
        return tf.concat(values=[y1, y2], axis=-1)


class SqueezeNet(tf.keras.Model):
    def __init__(self):
        super(SqueezeNet, self).__init__()
        self.conv1 = tf.keras.layers.Conv2D(filters=96,
                                            kernel_size=(7, 7),
                                            strides=2,
                                            padding="same")
        self.maxpool1 = tf.keras.layers.MaxPool2D(pool_size=(3, 3),
                                                  strides=2)
        self.fire2 = FireModule(s1=16, e1=64, e3=64)
        self.fire3 = FireModule(s1=16, e1=64, e3=64)
        self.fire4 = FireModule(s1=32, e1=128, e3=128)
        self.maxpool4 = tf.keras.layers.MaxPool2D(pool_size=(3, 3),
                                                  strides=2)
        self.fire5 = FireModule(s1=32, e1=128, e3=128)
        self.fire6 = FireModule(s1=48, e1=192, e3=192)
        self.fire7 = FireModule(s1=48, e1=192, e3=192)
        self.fire8 = FireModule(s1=64, e1=256, e3=256)
        self.maxpool8 = tf.keras.layers.MaxPool2D(pool_size=(3, 3),
                                                  strides=2)
        self.fire9 = FireModule(s1=64, e1=256, e3=256)
        self.dropout = tf.keras.layers.Dropout(rate=0.5)
        self.conv10 = tf.keras.layers.Conv2D(filters=NUM_CLASSES,
                                             kernel_size=(1, 1),
                                             strides=1,
                                             padding="same")
        self.avgpool10 = tf.keras.layers.GlobalAveragePooling2D()

    def call(self, inputs, training=None, mask=None):
        x = self.conv1(inputs)
        x = self.maxpool1(x)
        x = self.fire2(x)
        x = self.fire3(x)
        x = self.fire4(x)
        x = self.maxpool4(x)
        x = self.fire5(x)
        x = self.fire6(x)
        x = self.fire7(x)
        x = self.fire8(x)
        x = self.maxpool8(x)
        x = self.fire9(x)
        x = self.dropout(x, training=training)
        x = self.conv10(x)
        x = self.avgpool10(x)

        return tf.nn.softmax(x)

main

import os
from squeezenet_v2 import SqueezeNet

import tensorflow as tf
assert tf.version.VERSION >= "2.4.0", "version of tf must greater/equal than 2.4.0"

import numpy as np
from random import shuffle
import cv2 as cv
from tensorflow.keras import datasets, layers, optimizers, Sequential, metrics, losses

name_dict = {"BF":0,"BK":1,"BL":2,"BR":3,"CF":4,"CL":5,"CV":6,"CXK":7,"S":8,"XF":9}

data_root_path = "C:/my_all_data_download/ZCB/color_part_data_processing/"
test_file_path =  "C:/my_all_data_download/ZCB/TXT_doc/test.txt"  #测试集数据集文件
trainer_file_path = "C:/my_all_data_download/ZCB/TXT_doc/trainer.txt"  #训练集数据集文件

name_data_list = {}  #记录每类图片有多少训练图片、测试图片

trainer_list = []
test_list = []

#将图片完整路径存入字典
def save_train_test_file(path,name):
    if name not in name_data_list:
        img_list =[]
        img_list.append(path)
        name_data_list[name] = img_list
    else:
        name_data_list[name].append(path)

#遍历数据集目录,提取出图片路径,分训练集、测试集
dirs = os.listdir(data_root_path)
for d in dirs:
    full_path = data_root_path + d
    if os.path.isdir(full_path):
        imgs = os.listdir(full_path)  #列出子目录中所有图片
        for img in imgs:
            save_train_test_file(full_path+ "/" + img, d)

#将字典中的内容写入测试集、训练集文件
with open(test_file_path, "w") as f: #清空测试集文件
    pass
with open(trainer_file_path, "w") as f:  #清空训练集文件
    pass

#遍历字典,分数据
for name,img_list in name_data_list.items():
    i = 0
    num = len(img_list)
    print(f"{name}:{num}张")
    for img in img_list:
        if i % 10 == 0:
            test_list.append(f"{img}\t{name_dict[name]}\n")
        else:
            trainer_list.append(f"{img}\t{name_dict[name]}\n")
        i += 1
with open(trainer_file_path,"w") as f:
    shuffle(trainer_list)
    f.writelines(trainer_list)

with open(test_file_path,"w") as f:
    f.writelines(test_list)

print("---------------------------------------------------之前的代码主要是生成.txt文件便于找到图片和对应的标签-------------------------------------------------")

def generateds(train_list):
    x, y_ = [], []  # x图片数据,y_为标签
    with open(train_list,'r') as f:
        #读取所有行
        lines = [line.strip()for line in f] #对数据进行掐头去尾放入列表
        for line in lines:
            img_path, lab = line.strip().split("\t")
            img = cv.imread(img_path) #读入图片
            img = cv.resize(img,(224,224)) ####对图片进行放缩**********************************
            # img = np.array(img.convert('L')) #将图片变为8位宽灰度值的np.array格式
            img = img / 255 #数据归一化(实现预处理)
            x.append(img) #归一化后的数据,贴到列表x
            y_.append(lab)

    x = np.array(x)
    y_ = np.array(y_)
    y_ = y_.astype(np.int64)
    return x, y_

x_train , y_train = generateds(trainer_file_path)
x_test, y_test = generateds(test_file_path)
x_train = tf.convert_to_tensor(x_train,dtype=tf.float32)
y_train = tf.convert_to_tensor(y_train,dtype=tf.int32)
x_test = tf.convert_to_tensor(x_test,dtype=tf.float32)
y_test = tf.convert_to_tensor(y_test,dtype=tf.int32)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train)) #构建数据集对象
train_dataset = train_dataset.batch(32)  #设置批量训练的batch为32,要将训练集重复训练10遍
test_dataset = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_dataset = test_dataset.batch(32)
data_root = "/data/flower_photos"  # get data root path

# if not os.path.exists("./save_weights"):
#     os.makedirs("./save_weights")

im_height = 224
im_width = 224
batch_size = 32
epochs = 1000
num_classes = 10
freeze_layer = False

# data generator with data augmentation
# train_ds, val_ds = generate_ds(data_root, im_height, im_width, batch_size)

# create model
# model = Sequential()
# model.build((32, 224, 224, 3))
# model.summary()
model = SqueezeNet()
model.build((32,224,224,3))
model.summary()

# using keras low level api for training
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)

train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')

val_loss = tf.keras.metrics.Mean(name='val_loss')
val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')

@tf.function
def train_step(train_images, train_labels):
    with tf.GradientTape() as tape:
        output = model(train_images, training=True)
        loss = loss_object(train_labels, output)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    train_loss(loss)
    train_accuracy(train_labels, output)

@tf.function
def val_step(val_images, val_labels):
    output = model(val_images, training=False)
    loss = loss_object(val_labels, output)

    val_loss(loss)
    val_accuracy(val_labels, output)

best_val_acc = 0.
for epoch in range(epochs):
    train_loss.reset_states()  # clear history info
    train_accuracy.reset_states()  # clear history info
    val_loss.reset_states()  # clear history info
    val_accuracy.reset_states()  # clear history info

    # train

    for step, (images, labels) in enumerate(train_dataset):
        train_step(images, labels)

        # print train process
    # validate
    for step, (imags_val, labels_val) in enumerate(test_dataset):
        val_step(imags_val, labels_val)

        # print val process
    template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
    print(template.format(epoch,
                          train_loss.result(),
                          train_accuracy.result() * 100,
                          val_loss.result(),
                          val_accuracy.result() * 100))
    # only save best weights
    if val_accuracy.result() > best_val_acc:
        best_val_acc = val_accuracy.result()
        print(best_val_acc)
print('----------------------------------------------------------------------------------------------------:' , best_val_acc)
        # model.save_weights("./save_weights/resMobileNetV3.ckpt", save_format="tf")

测试集准确率大部分为72%左右,最好的一次为79.3478%

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值