tensorflow2写自己的网络结构、训练、保存、预测

写网络结构与训练、保存(test1.py)

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/7/16 14:33
# @Author : wutiande

import tensorflow as tf #tf_version: 2.5.0


class MyModel(tf.keras.Model):
    """构建网络模型"""
    def __init__(self,num_classes):
        super(MyModel, self).__init__()
        self.conv1 = tf.keras.layers.Conv2D(filters=32,kernel_size=3,strides=(1,1),
                                            padding="same")
        self.b1 = tf.keras.layers.BatchNormalization()
        self.a1 = tf.keras.activations.relu
        self.pool1 = tf.keras.layers.MaxPooling2D(pool_size=(2,2),strides=2)

        self.conv2 = tf.keras.layers.Conv2D(filters=32, kernel_size=3, strides=(1, 1),
                                            padding="same")
        self.b2 = tf.keras.layers.BatchNormalization()
        self.a2 = tf.keras.activations.relu
        self.pool2 = tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=2)

        self.flatten = tf.keras.layers.Flatten()
        self.dense1 = tf.keras.layers.Dense(units=num_classes)

    def call(self,inputs):
        x = self.conv1(inputs)
        x = self.b1(x)
        x = self.a1(x)
        x = self.pool1(x)
        x = self.conv2(x)
        x = self.b2(x)
        x = self.a2(x)
        x = self.pool2(x)

        x = self.flatten(x)
        x = self.dense1(x)
        output = tf.nn.softmax(x,name="output")
        return output


(x_train,y_train),(x_test,y_test) = tf.keras.datasets.mnist.load_data() # 导入数据

x_train = x_train/255.0 # 归一化
x_test = x_test/255.0

x_train = tf.expand_dims(x_train,axis=-1) # 增加维度
x_test = tf.expand_dims(x_test,axis=-1)
print(x_train.shape,y_train.shape)

model = MyModel(num_classes=10) # 初始化模型对象
# 编译
model.compile(optimizer=tf.keras.optimizers.Adam(),metrics=['accuracy'],loss=tf.keras.losses.sparse_categorical_crossentropy)
# 训练
model.fit(x_train,y_train,batch_size=32,epochs=50,validation_data=(x_test,y_test))
# 保存
tf.saved_model.save(model,"model1")

   它会自动生成model1的文件夹,将模型保存在里面

模型导入与预测(test2.py)

#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/7/16 15:56
# @Author : wutiande

import tensorflow as tf
import cv2
import numpy as np

"""导入待预测图片并进行数据预处理"""
inputs = cv2.imread("five.jpg",0)
inputs = ~inputs
cv2.imshow('w',inputs)
cv2.waitKey(0)
inputs = (cv2.resize(inputs,(28,28))/255.0).astype(np.float32) # 使符合保存的网络的输入
inputs = tf.expand_dims(inputs,axis=0)
inputs = tf.expand_dims(inputs,axis=-1)

model = tf.saved_model.load("model1") # 导入模型

# (_,_),(inputs,label)= tf.keras.datasets.mnist.load_data()
# inputs = (inputs[0]/255.0).astype(np.float32)
# label = label[0]
# inputs = tf.expand_dims(inputs,axis=0)
# inputs = tf.expand_dims(inputs,axis=-1)
# print(inputs.shape,label)

# 上面注释掉的是官方的数据,用来验证没有写错代码

# 预测
outputs = model(inputs)
outputs = list(outputs)[0]
# 输出预测结果
print(np.argmax(outputs))

   这是用来测试的five.jpg

 这是最后输出的结果

好的,以下是基于 TensorFlow 复现 ResNet50 网络的代码。代码中包含自己的数据集的二分类训练,以及防止过拟合的操作(冻结全连接层)。 ```python import tensorflow as tf from tensorflow.keras import layers, models, optimizers from tensorflow.keras.preprocessing.image import ImageDataGenerator # 定义 ResNet50 网络结构 def resnet50_model(input_shape=(224, 224, 3), num_classes=2): input_tensor = layers.Input(shape=input_shape) # Stage 1 x = layers.Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), padding='same', name='conv1')(input_tensor) x = layers.BatchNormalization(name='bn_conv1')(x) x = layers.Activation('relu')(x) x = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) # Stage 2 x = conv_block(x, filters=[64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, filters=[64, 64, 256], stage=2, block='b') x = identity_block(x, filters=[64, 64, 256], stage=2, block='c') # Stage 3 x = conv_block(x, filters=[128, 128, 512], stage=3, block='a') x = identity_block(x, filters=[128, 128, 512], stage=3, block='b') x = identity_block(x, filters=[128, 128, 512], stage=3, block='c') x = identity_block(x, filters=[128, 128, 512], stage=3, block='d') # Stage 4 x = conv_block(x, filters=[256, 256, 1024], stage=4, block='a') x = identity_block(x, filters=[256, 256, 1024], stage=4, block='b') x = identity_block(x, filters=[256, 256, 1024], stage=4, block='c') x = identity_block(x, filters=[256, 256, 1024], stage=4, block='d') x = identity_block(x, filters=[256, 256, 1024], stage=4, block='e') x = identity_block(x, filters=[256, 256, 1024], stage=4, block='f') # Stage 5 x = conv_block(x, filters=[512, 512, 2048], stage=5, block='a') x = identity_block(x, filters=[512, 512, 2048], stage=5, block='b') x = identity_block(x, filters=[512, 512, 2048], stage=5, block='c') x = layers.GlobalAveragePooling2D()(x) x = layers.Dense(units=num_classes, activation='softmax', name='fc' + str(num_classes))(x) # Create model. model = models.Model(inputs=input_tensor, outputs=x, name='resnet50') return model # 定义 identity block def identity_block(input_tensor, filters, stage, block): filters1, filters2, filters3 = filters block_name = 'res' + str(stage) + block + '_branch' x = layers.Conv2D(filters=filters1, kernel_size=(1, 1), name=block_name + '2a')(input_tensor) x = layers.BatchNormalization(name=block_name + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=filters2, kernel_size=(3, 3), padding='same', name=block_name + '2c')(x) x = layers.BatchNormalization(name=block_name + '2d')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=filters3, kernel_size=(1, 1), name=block_name + '2e')(x) x = layers.BatchNormalization(name=block_name + '2f')(x) x = layers.add([input_tensor, x]) x = layers.Activation('relu')(x) return x # 定义 conv block def conv_block(input_tensor, filters, stage, block, strides=(2, 2)): filters1, filters2, filters3 = filters block_name = 'res' + str(stage) + block + '_branch' x = layers.Conv2D(filters=filters1, kernel_size=(1, 1), strides=strides, name=block_name + '2a')(input_tensor) x = layers.BatchNormalization(name=block_name + '2b')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=filters2, kernel_size=(3, 3), padding='same', name=block_name + '2c')(x) x = layers.BatchNormalization(name=block_name + '2d')(x) x = layers.Activation('relu')(x) x = layers.Conv2D(filters=filters3, kernel_size=(1, 1), name=block_name + '2e')(x) x = layers.BatchNormalization(name=block_name + '2f')(x) shortcut = layers.Conv2D(filters=filters3, kernel_size=(1, 1), strides=strides, name=block_name + '1')(input_tensor) shortcut = layers.BatchNormalization(name=block_name + '2')(shortcut) x = layers.add([shortcut, x]) x = layers.Activation('relu')(x) return x # 加载数据 train_datagen = ImageDataGenerator( rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1./255) train_generator = train_datagen.flow_from_directory( 'train', target_size=(224, 224), batch_size=32, class_mode='categorical') validation_generator = test_datagen.flow_from_directory( 'test', target_size=(224, 224), batch_size=32, class_mode='categorical') # 创建 ResNet50 模型 model = resnet50_model(input_shape=(224, 224, 3), num_classes=2) # 冻结全连接层 for layer in model.layers[:-1]: layer.trainable = False # 编译模型 model.compile(optimizer=optimizers.Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) # 训练模型 history = model.fit_generator( train_generator, steps_per_epoch=train_generator.samples // train_generator.batch_size, epochs=10, validation_data=validation_generator, validation_steps=validation_generator.samples // validation_generator.batch_size) # 保存模型 model.save('resnet50_model.h5') # 预测 from tensorflow.keras.preprocessing import image import numpy as np img_path = 'example.jpg' img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = x / 255.0 preds = model.predict(x) print('Predicted:', preds[0]) ``` 使用该代码,你可以复现 ResNet50 网络结构,并对自己的数据集进行二分类训练。此外,你还可以添加防止过拟合的操作,如冻结全连接层,以提高模型的泛化能力。最后,你还可以保存训练结果和进行预测
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值