深度学习训练模板

导入所需的模块

import tensorflow  as tf
import numpy as np
from tensorflow.keras.optimizers import SGD

加载数据

(train_images, train_labels), (valid_images, valid_labels) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(-1, 28, 28, 1)
valid_images = valid_images.reshape(-1, 28, 28, 1)
train_images, valid_images = train_images / 255.0, valid_images / 255.0

print(train_images.shape)
# (60000, 28, 28)
print(train_labels.shape)
# (60000,)
print(train_images.dtype)
# float64 / float32 都可以
print(train_labels.dtype)
# uint8 / int16 / int32 / int64 等都可以

构建模型

model = tf.keras.models.Sequential([
    tf.keras.layers.Conv2D(6, (5,5), activation='tanh', input_shape=(28,28,1)),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Conv2D(16, (5,5), activation='tanh'),
    tf.keras.layers.MaxPooling2D(2,2),
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(120, activation='tanh'),
    tf.keras.layers.Dense(84, activation='tanh'),
    tf.keras.layers.Dense(10, activation='softmax')
])

model.compile(loss='sparse_categorical_crossentropy',    # 损失函数
             optimizer=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True),     # 优化器
             metrics=['acc'])    # 评估指标

训练模型

history = model.fit(train_images, train_labels, batch_size=32, epochs=2,\
 verbose=1, shuffle=True, validation_data=(valid_images, valid_labels))

评估模型

model.evaluate(valid_images, valid_labels, verbose=2)

保存模型

model.save("./test-model.h5")
new_model = tf.keras.models.load_model("./test-model.h5")

迁移学习

加载预训练权重

base_model = tf.keras.models.load_model("./model_h5/test-model.h5")
flatten = base_model.get_layer("flatten_1").output 
dense = tf.keras.layers.Dense(256, activation='relu')(flatten)
dense = tf.keras.layers.Dense(128, activation='relu')(dense)
pred = tf.keras.layers.Dense(10, activation='softmax')(dense)
fine_tune_model = tf.keras.models.Model(inputs=base_model.input, outputs=pred)
fine_tune_model.summary()

Freeze

for i, layer in enumerate(fine_tune_model.layers): # 打印各卷积层的名字
    print(i, layer.name)
for layer in fine_tune_model.layers[:6]:
    layer.trainable = False

Fine-tune

fine_tune_model.compile(loss='sparse_categorical_crossentropy', optimizer=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True), metrics=['acc'])

# Fine-tune
fine_tune_model.fit(train_images, train_labels, batch_size=32, epochs=1, verbose=1, shuffle=True, validation_data=(valid_images, valid_labels))

并行训练

strategy = tf.distribute.MirroredStrategy()
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))

with strategy.scope():
    model = tf.keras.models.Sequential([
        tf.keras.layers.Conv2D(6, (5,5), activation='tanh', input_shape=(28,28,1)),
        tf.keras.layers.MaxPooling2D(2,2),
        tf.keras.layers.Conv2D(16, (5,5), activation='tanh'),
        tf.keras.layers.MaxPooling2D(2,2),
        tf.keras.layers.Flatten(),
        tf.keras.layers.Dense(120, activation='tanh'),
        tf.keras.layers.Dense(84, activation='tanh'),
        tf.keras.layers.Dense(10, activation='softmax')
    ])

model.compile(loss='sparse_categorical_crossentropy', optimizer=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True), metrics=['acc'])
# Fine-tune
model.fit(train_images, train_labels, batch_size=32, epochs=1, verbose=1, shuffle=True, validation_data=(valid_images, valid_labels))

参考

  1. Keras Guide
  2. keras训练
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值