main()主要作用:
①读取信息:训练用的txt、类别、anchor
②判断是否是tiny
③权重保存格式
④样本数据按比例划分
⑤两阶段训练
⑥保存权重
def _main():
annotation_path = 'train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
is_tiny_version = len(anchors)==6 # default setting
if is_tiny_version:
model = create_tiny_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
else:
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
#该回调函数将日志信息写入TensorBorad,使得你可以动态的观察训练和测试指标的图像以及不同层的激活值直方图。该回调函数将在每个epoch后保存模型到filepath
logging = TensorBoard(log_dir=log_dir) #TensorBoard可视化工具
#save_weights_only只存储权重,save_best_only只存储最优结果,
#每隔3个epoch存储一次
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1) #降低学习率,每次0.1,当学习率三次未减少,就停止。verbose为1,显示进度条
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1) #验证集准确率,连续增加小于(min_delta)时,连续10个epoch,则终止训练
#样本数量:数据被拆10份
val_split = 0.1 #训练集和验证集比例
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split) #验证数
num_train = len(lines) - num_val #训练数
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not b