深度学习之路=====1=====>>LeNet(tensorflow2)

LeNet模型结构

在这里插入图片描述

导入库

import tensorboard
import tensorflow as tf
import os
from tensorflow.keras.layers import Conv2D,BatchNormalization,Activation,MaxPool2D,Dropout,Flatten,Dense
from tensorflow.keras import Model

训练、测试数据集分割

cifar=tf.keras.datasets.cifar10
(x_train,y_train),(x_test,y_test)=cifar.load_data()
x_train,x_test=x_train/255.0,x_test/255.0

搭建LeNet模型 baseline


#方法1:可读性强,较方法二繁琐
%load_ext tensorboard
class LeNet(Model):
    def __init__(self):
        super().__init__()
        #CONV1
        self.c1=Conv2D(filters=6,kernel_size=(5,5),padding='same',activation='sigmoid')
        self.p1=MaxPool2D(pool_size=(2,2),strides=2,padding='same')
        #CONV2
        self.c2=Conv2D(filters=16,kernel_size=(5,5),padding='same',activation='sigmoid')
        self.p2=MaxPool2D(pool_size=(2,2),strides=2,padding='same')
        #FLATTEN
        self.flatten=Flatten()
        self.f1=Dense(120,activation='sigmoid')
        self.f2=Dense(84,activation='sigmoid')
        self.f3=Dense(10,activation='sigmoid')
    def call(self,x):
        x=self.c1(x)
        x=self.p1(x)
        x=self.c2(x)
        x=self.p2(x)
        x=self.flatten(x)
        x=self.f1(x)
        x=self.f2(x)
        y=self.f3(x)
        return y

##方法二:call函数简单
from tensorflow.keras import layers
%load_ext tensorboard
class LeNet(Model):
    def __init__(self):
        super().__init__()
        #CONV1
        self.input_layer=layers.Conv2D(filters=6,kernel_size=(5,5),padding='same',activation='sigmoid')
        self.middle_layers=[
        layers.MaxPool2D(pool_size=(2,2),strides=2,padding='same'),
        #CONV2
        layers.Conv2D(filters=16,kernel_size=(5,5),padding='same',activation='sigmoid'),
        layers.MaxPool2D(pool_size=(2,2),strides=2,padding='same'),
        #FLATTEN,
        layers.Flatten(),
        layers.Dense(120,activation='sigmoid'),
        layers.Dense(84,activation='sigmoid')]
        self.output_layer=layers.Dense(10,activation='sigmoid')
    def call(self,x):
        x=self.input_layer(x)
        for layer in self.middle_layers:
            x=layer(x)
        y=self.output_layer(x)
        return y

编译和训练模型

import datetime
model=LeNet()
model.compile(optimizer="adam",loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),metrics=['sparse_categorical_accuracy'])
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
history=model.fit(x_train,y_train,batch_size=32,epochs=5,validation_data=(x_test,y_test),validation_freq=1,callbacks=[tensorboard_callback])

tensorboard展示训练过程

%tensorboard --logdir logs/scalars

保存模型权重

save_path = './save_weights/'
if os.path.exists(save_path)==False:
    model.save_weights(save_path)

加载模型权重并预测

model.load_weights(save_path)
model.predict(x_train[1].reshape(1,32,32,3))

保存模型结构和参数

model_path='./model/net_model'
if os.path.exists(model_path)==False:
    model.save(model_path)

加载模型结构和参数

# 模型加载
new_model=tf.keras.models.load_model('./model/net_model')
new_model.predict(x_train[1].reshape(1,32,32,3))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值