keras--yolov3源码注释

注意box的左上角(xmin,ymin)和右下角坐标(xmax,ymax)的坐标原点是左上角,如下图
在这里插入图片描述

model.py

"""YOLO_v3 Model Defined in Keras."""

from functools import wraps

import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.layers import Conv2D, Add, ZeroPadding2D, UpSampling2D, Concatenate, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.regularizers import l2

from yolo3.utils import compose


@wraps(Conv2D)
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet parameters for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}  # 对权重参数L2正则化
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get(
        'strides') == (2, 2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs)


def DarknetConv2D_BN_Leaky(*args, **kwargs):
    """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
    # *args: 32, (3,3) 任意传多个参数.
    # **kwargs:{} 有关键字的参数,一旦传的时候有写关键字则都会被kwargs的字典捕获.
    no_bias_kwargs = {'use_bias': False}
    no_bias_kwargs.update(kwargs)
    return compose(
        DarknetConv2D(*args, **no_bias_kwargs),
        BatchNormalization(),
        LeakyReLU(alpha=0.1))


def resblock_body(x, num_filters, num_blocks):
    '''A series of resblocks starting with a downsampling Convolution2D'''
    # Darknet uses left and top padding instead of 'same' mode

    # 填充x的边界为0,由(?, 416, 416, 32)转换为(?, 417, 417, 32)。因为下一步卷积操作的步长为2,所以图的边长需要是奇数;
    x = ZeroPadding2D(((1, 0), (1, 0)))(x)  # 在上和左补一层零
    # valid(p=0) 特征图将变小 (417-3+2*0)/2+1 = 208
    x = DarknetConv2D_BN_Leaky(num_filters, (3, 3), strides=(2, 2))(x)
    for i in range(num_blocks):
        # compose组合函数 先1x1卷积(过滤器数量减半) 再3x3卷积(过滤器数量恢复) 最后与输入相同还是64通道
        y = compose(
            DarknetConv2D_BN_Leaky(num_filters//2, (1, 1)),
            DarknetConv2D_BN_Leaky(num_filters, (3, 3)))(x)
        # 残差操作,将x的值与y的值相加。残差操作可以避免,在网络较深时所产生的梯度弥散问题。
        x = Add()([x, y])
    return x


def darknet_body(x):
    '''Darknent body having 52 Convolution2D layers'''
    # DarknetConv2D_BN_Leaky: Conv2D-> BatchNormalization-> LeakyReLU
    x = DarknetConv2D_BN_Leaky(32, (3, 3))(x)  # (416,416,32)
    # 5个resblock
    x = resblock_body(x, 64, 1)  # (208,208,64)
    x = resblock_body(x, 128, 2)  # (104,104,128)
    x = resblock_body(x, 256, 8)  # (52,52,256)
    x = resblock_body(x, 512, 8)  # (26,26,512)
    x = resblock_body(x, 1024, 4)  # (13,13,1024)
    return x


def make_last_layers(x, num_filters, out_filters):
    '''6 Conv2D_BN_Leaky layers followed by a Conv2D_linear layer'''
    x = compose(
        DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
        DarknetConv2D_BN_Leaky(num_filters*2, (3, 3)),
        DarknetConv2D_BN_Leaky(num_filters, (1, 1)),
        DarknetConv2D_BN_Leaky(num_filters*2, (3, 3)),
        DarknetConv2D_BN_Leaky(num_filters, (1, 1)))(x)
    y = compose(
        DarknetConv2D_BN_Leaky(num_filters*2, (3, 3)),
        DarknetConv2D(out_filters, (1, 1)))(x)
    return x, y


def yolo_body(inputs, num_anchors, num_classes):
    """Create YOLO_V3 model CNN body in Keras."""
    darknet = Model(inputs, darknet_body(inputs))

    # predict1  input: (?,13,13,1024) -> x (?,13,13,512), `y1 (?,13,13,num_anchors*(num_classes+5) )`
    # num_anchors:边框个数 一个边框要预测 num_classes+5个值
    x, y1 = make_last_layers(darknet.output, 512, num_anchors*(num_classes+5))

    x = compose(
        DarknetConv2D_BN_Leaky(256, (1, 1)),
        UpSampling2D(2))(x)  # (?,26,26,256)
    # darknet.layers[152].output 获取第152层的输出(?,26,26,512)
    x = Concatenate()([x, darknet.layers[152].output])  # (?,26,26,768)

    # predict2  input: (?,26,26,768) -> x (?,26,26,256), `y2 (?,26,26,num_anchors*(num_classes+5) )`
    x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5))

    x = compose(
        DarknetConv2D_BN_Leaky(128, (1, 1)),
        UpSampling2D(2))(x)  # (?,52,52,128)
    # darknet.layers[92].output  (?,52,52,256)
    x = Concatenate()([x, darknet.layers[92].output])  # (?,52,52,384)

    # predict3  input: (?,52,52,384) -> x (?,52,52,128), `y3 (?,52,52,num_anchors*(num_classes+5) )`
    x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5))

    # Model  input:(?,416,416,3) => output:[y1, y2, y3]
    # 输出为3个尺度的预测层,y2、y3比y1更适用于发现尺寸`较小`的物体。
    return Model(inputs, [y1, y2, y3])


def tiny_yolo_body(inputs, num_anchors, num_classes):
    '''Create Tiny YOLO_v3 model CNN body in keras.'''
    x1 = compose(
        DarknetConv2D_BN_Leaky(16, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(32, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(64, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(128, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(256, (3, 3)))(inputs)
    x2 = compose(
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'),
        DarknetConv2D_BN_Leaky(512, (3, 3)),
        MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same'),
        DarknetConv2D_BN_Leaky(1024, (3, 3)),
        DarknetConv2D_BN_Leaky(256, (1, 1)))(x1)
    y1 = compose(
        DarknetConv2D_BN_Leaky(512, (3, 3)),
        DarknetConv2D(num_anchors*(num_classes+5), (1, 1)))(x2)

    x2 = compose(
        DarknetConv2D_BN_Leaky(128, (1, 1)),
        UpSampling2D(2))(x2)
    y2 = compose(
        Concatenate(),
        DarknetConv2D_BN_Leaky(256, (3, 3)),
        DarknetConv2D(num_anchors*(num_classes+5), (1, 1)))([x2, x1])

    return Model(inputs, [y1, y2])


def yolo_head(feats, anchors, num_classes, input_shape, calc_loss=False):
    """将预测的feats转化为box参数(xywh,相对于grid cell的尺度).
    Argument
        # feats: 某一种feature maps, (m,13,13,255) or (m,26,26,255) or (m,52,52,255)
            从feats最后一维中取出预测的tx,y,w,h, 然后按照下式计算边框的相对gridcell的几何值.
                bx = sigmoid(tx) + cx
                by = sigmoid(ty) + cy
                bw = pw*exp(tw)
                bh = ph*exp(th)
            以及sigmoid计算目标框置信度和类别置信度 box_confidence和box_class_probs
            在算loss的时候并不需要返回box_confidence和box_class_probs, 因为loss是直接用logits来算的,
            不能加sigmoid, 算loss直接取网络输出就好了.
        # anchors: 某一种feature maps对应的anchors.
        # input_shape: (416,416).
        # calc_loss: 是否计算loss.

    Returns:
        # box_xy: (m,13,13,3,2), 目标框的中心坐标, 相对于grid cell.
        # box_wh: (m,13,13,3,2), 目标框的宽高, 相对于grid cell.
        # box_confidence: (m,13,13,3,1) 目标框的置信度box_confidence 85维向量的第5个值
        # box_class_probs: (m,13,13,3,80) 预测类别置信度的概率
        训练时返回: grid, feats, box_xy, box_wh, 计算loss.
        # grid: (13,13,1,2) or (26,26,1,2) or (52,52,1,2).
        # feats: (m,13,13,3,85) or (m,26,26,3,85) or (m,52,52,3,85).
    """

    num_anchors = len(anchors)
    # Reshape to batch, height, width, num_anchors, box_params.
    # (1,1,1,3,2) 每种feature map的num_anchors是3.
    anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])

    # (13,13) or (26,26) or (52,52)
    grid_shape = K.shape(feats)[1:3]  # height, width
    # grid_y,grid_x (13,13,1,1) or (26,26,1,1) or (52,52,1,1)
    """
    K.arange(0, stop=grid_shape[0])
    >>> [ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12]
    K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1])
    >>> array([
                [[[ 0]]],
                [[[ 1]]],
                [[[ 2]]],
                  ...
                [[[12]]]
              ])
    K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
                    [1, grid_shape[1], 1, 1])
    >>> grid_y (13,13,1,1)
        array([
                [[[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]],
                [[ 0]]],
                  ...
                [[[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]],
                [[12]]]
              ])
    grid_x (13,13,1,1)
    >>> array([
            [[[ 0]],
            [[ 1]],
            [[ 2]],
            [[ 3]],
            [[ 4]],
            [[ 5]],
            [[ 6]],
            [[ 7]],
            [[ 8]],
            [[ 9]],
            [[10]],
            [[11]],
            [[12]]]

            [[[ 0]],
            [[ 1]],
            [[ 2]],
            [[ 3]],
            [[ 4]],
            [[ 5]],
            [[ 6]],
            [[ 7]],
            [[ 8]],
            [[ 9]],
            [[10]],
            [[11]],
            [[12]]]
        ])
    """
    grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
                    [1, grid_shape[1], 1, 1])
    grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
                    [grid_shape[0], 1, 1, 1])
    """
    grid = K.concatenate([grid_x, grid_y])
    >>> <tf.Tensor: id=370, shape=(13, 13, 1, 2), dtype=int32, numpy=
        array([[[[ 0,  0]],
                [[ 1,  0]],
                [[ 2,  0]],
                [[ 3,  0]],
                [[ 4,  0]],
                [[ 5,  0]],
                [[ 6,  0]],
                [[ 7,  0]],
                [[ 8,  0]],
                [[ 9,  0]],
                [[10,  0]],
                [[11,  0]],
                [[12,  0]]],
                    ...    
                [[[ 0, 12]],
                [[ 1, 12]],
                [[ 2, 12]],
                [[ 3, 12]],
                [[ 4, 12]],
                [[ 5, 12]],
                [[ 6, 12]],
                [[ 7, 12]],
                [[ 8, 12]],
                [[ 9, 12]],
                [[10, 12]],
                [[11, 12]],
                [[12, 12]]]]
    """
    grid = K.concatenate([grid_x, grid_y])  # (13,13,1,2)
    grid = K.cast(grid, K.dtype(feats))  # float32

    """将预测结果分离到最后一个维度, 85维的向量代表一个anchor box的预测结果, 每个网格共有3个anchor box.
    (bath, 13,13,3,85) or (batch,26,26,3,85) or (batch,52,52,3,85)  """
    feats = K.reshape(feats,
                      [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])

    # Adjust preditions to each spatial grid point and anchor size.
    """
    box_xy 目标框的中心点 (batch,13,13,3,2), 用sigmoid函数计算.
            cx cy 分别为grid cell方格左上角点相对整张图片的坐标, 比如第一个grid cell预测的3个box 
            它在13x13中的中心坐标是(sigmoid(tx)+0,sigmoid(ty)+0), 且一个grid cell内的3个box共享同一个cx,cy
            所以 (m,13,13,3,2) + (13,13,1,2) 用了广播机制.
            bx = sigmoid(tx) + cx
            by = sigmoid(ty) + cy
    box_wh 目标框的宽和高 (batch,13,13,3,2), 用exp函数计算, pw ph就是anchors_tensor 也即聚类的结果.
            bw = pw*exp(tw)
            bh = ph*exp(th)
    box_xy, box_wh都是相对于当前cell的(relative)
    t_* 就是网络预测的值

    box_confidence (batch,13,13,3,1) 目标框的置信度box_confidence 85维向量的第5个值
    box_class_probs (batch,13,13,3,80) 后80个值是预测类别置信度的概率
    """
    box_xy = (K.sigmoid(feats[..., :2]) + grid) / \
        K.cast(grid_shape[::-1], K.dtype(feats))  # grid_shape[::-1] (13,13)
    # 预测的box宽高加入人为经验, 即anchor box的用处在此
    box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / \
        K.cast(input_shape[::-1], K.dtype(feats))
    box_confidence = K.sigmoid(feats[..., 4:5])  # 该anchor box包含object的概率
    box_class_probs = K.sigmoid(feats[..., 5:])  # 该anchor box包含的object属于哪一类的概率

    if calc_loss == True:
        # 训练的时候用, grid(13,13,1,2), feats (batch,13,13,3,85),
        # box_xy (batch,13,13,3,2), box_wh (batch,13,13,3,2)
        return grid, feats, box_xy, box_wh
    return box_xy, box_wh, box_confidence, box_class_probs


def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
    """将box的中心宽高 xywh 转换为原始输入图片(例如500,375的图片)上的真实坐标
    Argument
        # box_xy, box_wh: (m,13,13,3,2), 相对于grid cell尺度的box参数.
        # input_shape: (416,416).
        # image_shape: 图像尺寸,例如(500,375).

    Returns
        # boxes: (m,13,13,3,4): 尺寸和原图(image_shape)一致, 为box的左上角和右下角坐标(y_min, x_min, y_max, x_max).
    """

    # box_yx (batch,13,13,3,2)
    # box_hw (batch,13,13,3,2)
    box_yx = box_xy[..., ::-1]  # 逆序调换了位置
    box_hw = box_wh[..., ::-1]
    input_shape = K.cast(input_shape, K.dtype(box_yx))  # (13,13)
    image_shape = K.cast(image_shape, K.dtype(box_yx))  # (500,375)

    # K.round: 以元素方式四舍五入到最接近的整数。
    # K.min: 选取张量中的最小值
    new_shape = K.round(
        image_shape * K.min(input_shape/image_shape))  # (13,13)
    offset = (input_shape-new_shape)/2./input_shape  # (0,0)
    scale = input_shape/new_shape  # (1,1)
    box_yx = (box_yx - offset) * scale
    box_hw *= scale

    # (ymin,xmin): box的左上角, (ymax,xmax): box的右下角
    box_mins = box_yx - (box_hw / 2.)
    box_maxes = box_yx + (box_hw / 2.)
    boxes = K.concatenate([
        box_mins[..., 0:1],  # y_min
        box_mins[..., 1:2],  # x_min
        box_maxes[..., 0:1],  # y_max
        box_maxes[..., 1:2]  # x_max
    ])

    # Scale boxes back to original image shape. 将box的尺寸放大到和原图像一样
    boxes *= K.concatenate([image_shape, image_shape])
    # (batch,13,13,3,4)
    return boxes


def yolo_boxes_and_scores(feats, anchors, num_classes, input_shape, image_shape):
    """
    Arguments
        # feats: 第l层feature map, 
        #        eg (m,13,13,255) or (m,26,26,255) or (m,52,52,255).
        # anchors: 第l层feature map对应的锚框 featuremap 越小对应的锚框越大 检测更大的物体.
        # num_classes: 类别数, coco 80.
        # input_shape: (416,416).
        # image_shape: 输入image size, 例如(500,375).

    Returns
        # boxes: (m*13*13*3, 4) 所有box的左上角和右下角坐标(原图尺度).
        # box_scores: (m*13*13*3, 80) 所有box得分.
    """

    """
    先调用`yolo_head`将第l种feats转换为box的参数(xywh的尺度是相对于grid cell的)
    # box_xy: 预测box的中心. (m,13,13,3,2)
    # box_wh: 预测box的宽高. (m,13,13,3,2)
    # box_confidence: box包含object的概率. (m,13,13,3,1)
    # box_class_probs: object属于某个类的概率. (m,13,13,3,80)
    """
    box_xy, box_wh, box_confidence, box_class_probs = yolo_head(feats,
                                                                anchors, num_classes, input_shape)
    """
    再调用`yolo_correct_boxes`相对于grid cell尺寸的xywh转换为相对于原始图片(image_shape)的box参数.
    # boxes: (m,13,13,3,4), (ymin, xmin, ymax, xmax)
    """
    boxes = yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape)
    # (batch*13*13*3, 4) 所有box的左下角和右上角坐标
    boxes = K.reshape(boxes, [-1, 4])
    # (batch,13,13,3,1) * (batch,13,13,3,80) => (batch,13,13,3,80) 广播机制.
    box_scores = box_confidence * box_class_probs
    # (batch*13*13*3, 80) 所有box预测的object类别概率.
    box_scores = K.reshape(box_scores, [-1, num_classes])
    return boxes, box_scores


def yolo_eval(yolo_outputs,
              anchors,
              num_classes,
              image_shape,
              max_boxes=20,  # 每张图每类最多检测20个框
              score_threshold=.6,
              iou_threshold=.5):
    """ 
    Arguments
        # yolo_outputs: 模型输出, [y1, y2, y3] 
                        => [(m,13,13,n),(m,26,26,n),(m,52,52,n)], n = 3*(num_classes+5).
        # anchors: (N,2) array, yolo中为9个先验anchors box, 用来加入人工经验修正预测box.
        # num_classes: 类别数.
        # image_shape: 输入图片实际大小, 不一定是(416,416).
        # score_threshold: 第一次过滤参数.
        # max_boxes: 每个类最多有max_boxes个box, NMS算法的参数, 第二次过滤.
        # iou_threshold: NMS算法的参数, 第二次过滤.

    Returns
        所有类经过两次过滤后剩下X个框
        # boxes_ 所有两次过滤后框的坐标(`ymin, xmin, ymax, xmax`,原图尺度), (X, 4)
        # scores_ 所有两次过滤后框的置信度, (X, 1)
        # classes_ 所有两次过滤后框的类别, (X, 1)
    """

    num_layers = len(yolo_outputs)
    # 为每种feature map分配anchor box, 特征特越小 感受野越大 对大目标越敏感, 用大的anchor box
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [
        [3, 4, 5], [1, 2, 3]]  # default setting
    input_shape = K.shape(yolo_outputs[0])[1:3] * 32  # (416,416)
    boxes = []
    box_scores = []
    # 分别预测3种feature map
    # (batch,13,13,num_anchors*(num_classes+5))
    # (batch,26,26,num_anchors*(num_classes+5))
    # (batch,52,52,num_anchors*(num_classes+5))
    for l in range(num_layers):
        """
        调用`yolo_boxes_and_scores`函数将第l种feature转换为box的参数
        # _boxes: (m*13*13*3, 4) 所有box的(ymin, xmin, ymax, xmax), 原图尺度.
        # _box_scores: (m*13*13*3, 80) 所有box得分.
        """
        # 传入第l种feature map的输出和它对应的anchor_box anchors[anchor_mask[l]]
        _boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l],
                                                    anchors[anchor_mask[l]], num_classes, input_shape, image_shape)
        boxes.append(_boxes)
        box_scores.append(_box_scores)
    """
    # boxes为所有特征图的box参数(m*10647, 4)
    # box_scores为所有特征图的boxbox得分(m*10647, 80), 
    10647:每张图片会预测 13*13*3+26*26*3+52*52*3 = 10647 个box
    """
    boxes = K.concatenate(boxes, axis=0)
    box_scores = K.concatenate(box_scores, axis=0)

    """
    进行两次过滤, 第一次利用阈值scores过滤, 第二次利用NMS过滤
    """
    mask = box_scores >= score_threshold
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')  # 20
    boxes_ = []
    scores_ = []
    classes_ = []
    # 遍历80个类别
    for c in range(num_classes):
        # boolean_mask 将使boxes矩阵仅保留与mask[:,c]中True元素同下标的部分
        # 第一次过滤剩余num1个box
        class_boxes = tf.boolean_mask(boxes, mask[:, c])  # (num, 4)
        class_box_scores = tf.boolean_mask(box_scores[:, c],
                                           mask[:, c])  # (num, 1)
        # 第二次过滤, 非极大抑制(NMS), 每类最多检测max_boxes_tensor(20)个框.
        # 第二次过滤剩余num2个box
        nms_index = tf.image.non_max_suppression(
            class_boxes, class_box_scores, max_boxes_tensor, iou_threshold=iou_threshold)
        # gather 提取张量中下标为nms_index的张量
        class_boxes = K.gather(class_boxes, nms_index)  # (num2, 4)
        class_box_scores = K.gather(class_box_scores, nms_index)  # (num2, 1)
        # num2个框所属的类别为c.
        classes = K.ones_like(class_box_scores, 'int32') * c  # (num2, 1)
        boxes_.append(class_boxes)
        scores_.append(class_box_scores)
        classes_.append(classes)

    """ 
    所有类经过两次过滤后剩下X个框
    # boxes_ 所有两次过滤后框的坐标, (X, 4)
    # scores_ 所有两次过滤后框的置信度, (X, 1)
    # classes_ 所有两次过滤后框的类别, (X, 1)
    """
    boxes_ = K.concatenate(boxes_, axis=0)
    scores_ = K.concatenate(scores_, axis=0)
    classes_ = K.concatenate(classes_, axis=0)

    return boxes_, scores_, classes_


def preprocess_true_boxes(true_boxes, input_shape, anchors, num_classes):
    '''Preprocess true boxes to training input format

    Arguments
        # true_boxes: array, shape=(m, max_boxes, 5). Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
                      m images, each image has max_boxes boxs, each box has 5 value (low_left_x, low_left_y, top_right_x, top_right_y, class_id).
        # input_shape: (1024, 1024)
        # anchors: (N, 2), N(9) anchors box, width, height.
        # num_classes: integer.
    Returns
        # y_true: list of array, shape like yolo_outputs, xywh are reletive value.

    '''

    assert (true_boxes[..., 4] < num_classes).all(
    ), 'class id must be less than num_classes'
    # 3种feature map, (13,13,255) (26,26,255) (52,52,255)
    num_layers = len(anchors)//3  # default setting
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]

    true_boxes = np.array(true_boxes, dtype='float32')
    input_shape = np.array(input_shape, dtype='int32')  # (416,416)

    # 得到中心点坐标, 宽高
    # (左下角坐标 + 右上角坐标)/2 = 中心  (m,T,2)
    boxes_xy = (true_boxes[..., 0:2] + true_boxes[..., 2:4]) // 2
    # (右上角坐标 - 左下角坐标) = 宽高  (m,T,2)
    boxes_wh = true_boxes[..., 2:4] - true_boxes[..., 0:2]

    # 中心坐标 和 宽高 都变成 相对于input_shape的比例(reletive value)
    true_boxes[..., 0:2] = boxes_xy/input_shape[::-1]
    true_boxes[..., 2:4] = boxes_wh/input_shape[::-1]

    """现在true_boxes 中的数据从 `左下角右上角坐标` 变成了 `相对原始图片的` `中心点 和 宽高 x,y,w,h` """
    m = true_boxes.shape[0]  # batch 输入图片的个数
    grid_shapes = [input_shape//{0: 32, 1: 16, 2: 8}[l]
                   for l in range(num_layers)]  # {0: 32, 1: 16, 2: 8} 临时字典
    """grid_shapes
    >>> [array([13, 13], dtype=int32), array([26, 26], dtype=int32), array([52, 52], dtype=int32)] """

    y_true = [np.zeros((m, grid_shapes[l][0], grid_shapes[l][1], len(anchor_mask[l]), 5+num_classes),
                       dtype='float32') for l in range(num_layers)]
    """y_true
    print(y_true[0].shape, y_true[1].shape, y_true[2].shape) # m张图片
    >>> (m, 13, 13, 3, 85)
        (m, 26, 26, 3, 85)
        (m, 52, 52, 3, 85) """

    # Expand dim to apply broadcasting.
    anchors = np.expand_dims(anchors, 0)  # (1,9,2)
    # 假设以中心点为原坐标, 则用以下方法计算anchor box的左下角和右上角坐标
    anchor_maxes = anchors / 2.  # (1,9,2)
    anchor_mins = -anchor_maxes  # (1,9,2)
    # valid_mask: (m, T) 代表第b张图片的第t个box是否有效 valid_mask[b] 即第b张图片的所有ground truth 是否有效.
    valid_mask = boxes_wh[..., 0] > 0

    # 遍历所有batch张图片.
    """
    # YOLO3训练样本设置: https://www.jianshu.com/p/67163d52946f 下面这一个循环就是为每张图片的标注边框找到最好的anchor来负责预测对应的object
    """
    for b in range(m):
        # Discard zero rows.
        # 提取图片b的有效box (validnum, 2) validnum 有效box
        wh = boxes_wh[b, valid_mask[b]]
        if len(wh) == 0:
            continue
        # Expand dim to apply broadcasting.
        wh = np.expand_dims(wh, -2)  # (validnum, 1, 2) 倒数第二维扩展
        box_maxes = wh / 2.  # 右上角
        box_mins = -box_maxes  # 左下角

        """计算ground_true与anchor的 IOU.
        对于当前图片的所有标记的ground truth框, 计算它们和9个anchor box的IOU, 
        计算IOU只需要用到框的 `宽和高` 即可,不需要用到框的位置, 并且anchor box本身也没有位置信息 只有wh.
        然后在9个anchor box里找出能用来检测这张图片中的物体的anchor box(IOU最大的), 因为一些小的anchor box很难检测大的物体
        而太大的anchor box又很难检测小的物体. 因此对于这张图片中的所有object 都找到一个最好的anchor box(IOU和该object最大)负责预测它, 将该anchor置信度设为1
        其它的anchor为0, 并且该anchor的预测类别设置为该object的类, 即类别的one-hot编码设为1. 
        """

        """下面是批量计算validnum个ground truth box和9个anchor的IOU. """
        # maximum(X, Y) X和Y逐位进行比较,选择最大值.
        intersect_mins = np.maximum(box_mins, anchor_mins)  # (validnum,9,2)
        intersect_maxes = np.minimum(box_maxes, anchor_maxes)  # (validnum,9,2)
        intersect_wh = np.maximum(
            intersect_maxes - intersect_mins, 0.)  # (validnum,9,2)

        # intersect_area 两个box的交集, box_area anchor_area 两个box的面积.
        intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
        box_area = wh[..., 0] * wh[..., 1]  # (validnum, 1)
        anchor_area = anchors[..., 0] * anchors[..., 1]  # (1,9)
        # 交集/并集
        # (validnum, 9) 每个ground truth与anchor的IOU
        iou = intersect_area / (box_area + anchor_area - intersect_area)

        # Find best anchor for each true box
        # 对一个(validnum, 9)的矩阵的最后一个维度argmax
        # 返回(validnum,)的数组 代表每个ground truth最好的那个anchor
        best_anchor = np.argmax(iou, axis=-1)  # (validnum, )

        # 遍历所有最优的anchor
        # t是ground truth的id, n是anchor的id
        for t, n in enumerate(best_anchor):
            for l in range(num_layers):  # 3种特征图
                if n in anchor_mask[l]:
                    """
                    # true_boxes (m,T,5)
                    # grid_shape [(13,13), (26,26), (52,52)]
                    # np.floor 返回不大于输入参数的最大整数。 即对于输入值 x ,将返回最大的整数 i ,使得 i <= x。
                    # true_boxes[b, t, 0],1 第b张图片第t个ground truth相对原始图片的中心点(x,y)
                    # grid_shape[l][1],0  13,13 or 26,26 or 52,52.
                    # 二者相乘将中心点放大到和第l个特征图相匹配的尺寸 找到该ground truth的中心位于哪个cell中心, 
                    # 由这个cell(i,j)负责预测.
                    """
                    i = np.floor(true_boxes[b, t, 0] *
                                 grid_shapes[l][1]).astype('int32')
                    j = np.floor(true_boxes[b, t, 1] *
                                 grid_shapes[l][0]).astype('int32')

                    """ 
                    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
                    第l个feature map的anchor_mask, 如[6,7,8] 
                    anchor_mask[0].index(7) >>> 1
                    """
                    # 找到n在 anchor_box的索引位置
                    k = anchor_mask[l].index(n)
                    # 得到groundtruth的类别id
                    c = true_boxes[b, t, 4].astype('int32')
                    """
                    y_true是一个list, list中是zero array
                    shape是这样的, 对照着看就能理解下面的操作了.
                    print(y_true[0].shape, y_true[1].shape, y_true[2].shape) # m张图片
                    >>> (m, 13, 13, 3, 85)
                        (m, 26, 26, 3, 85)
                        (m, 52, 52, 3, 85)
                    """
                    # 找到 第l种特征图 第b张图像 第j行 i列 第k个anchor
                    # 并将 x,y,w,h (relative 0~1), confindence, 类别概率 分别赋值给它
                    y_true[l][b, j, i, k, 0:4] = true_boxes[b, t, 0:4]  # xywh
                    y_true[l][b, j, i, k, 4] = 1  # 置信度为1,包含object
                    y_true[l][b, j, i, k, 5+c] = 1  # 类别的one-hot编码
    return y_true


def box_iou(b1, b2):
    '''Return iou tensor

    Parameters
    ----------
    b1: tensor, shape=(i1,...,iN, 4), xywh
    b2: tensor, shape=(j, 4), xywh

    Returns
    -------
    iou: tensor, shape=(i1,...,iN, j)

    '''

    # Expand dim to apply broadcasting.
    b1 = K.expand_dims(b1, -2)
    b1_xy = b1[..., :2]
    b1_wh = b1[..., 2:4]
    b1_wh_half = b1_wh/2.
    b1_mins = b1_xy - b1_wh_half
    b1_maxes = b1_xy + b1_wh_half

    # Expand dim to apply broadcasting.
    b2 = K.expand_dims(b2, 0)
    b2_xy = b2[..., :2]
    b2_wh = b2[..., 2:4]
    b2_wh_half = b2_wh/2.
    b2_mins = b2_xy - b2_wh_half
    b2_maxes = b2_xy + b2_wh_half

    intersect_mins = K.maximum(b1_mins, b2_mins)
    intersect_maxes = K.minimum(b1_maxes, b2_maxes)
    intersect_wh = K.maximum(intersect_maxes - intersect_mins, 0.)
    intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]
    b1_area = b1_wh[..., 0] * b1_wh[..., 1]
    b2_area = b2_wh[..., 0] * b2_wh[..., 1]
    iou = intersect_area / (b1_area + b2_area - intersect_area)

    return iou


def yolo_loss(args, anchors, num_classes, ignore_thresh=.5, print_loss=False):
    '''Return yolo_loss tensor

    Parameters
    ----------
    yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
    y_true: list of array, the output of preprocess_true_boxes
    anchors: array, shape=(N, 2), wh
    num_classes: integer
    ignore_thresh: float, the iou threshold whether to ignore object confidence loss

    Returns
    -------
    loss: tensor, shape=(1,)

    '''
    num_layers = len(anchors)//3  # default setting
    # yolo_outputs:[y1,y2,y3]
    # y1 (m,13,13,3*85), y2 (m,26,26,3*85), y3(m,52,52,3*85)
    yolo_outputs = args[:num_layers]  # 模型输出
    # y_true: [(m, 13, 13, 3, 85), (m, 26, 26, 3, 85), (m, 52, 52, 3, 85)]
    y_true = args[num_layers:]  # ground truth
    anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                   ] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
    input_shape = K.cast(K.shape(yolo_outputs[0])[
                         1:3] * 32, K.dtype(y_true[0]))  # (416, 416)
    # grid_shapes[[13,13],[26,26],[52,52]]
    grid_shapes = [K.cast(K.shape(yolo_outputs[l])[1:3],
                          K.dtype(y_true[0])) for l in range(num_layers)]
    loss = 0
    m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
    mf = K.cast(m, K.dtype(yolo_outputs[0]))

    for l in range(num_layers):  # 第l种feature map
        # object_mask第l种特征图的m张图片的所有anchor的置信度 eg.(m, 13, 13, 3, 1)
        # ground truth的值已经赋予了某个anchor, 没被赋予的anchor值还是0.
        object_mask = y_true[l][..., 4:5]
        # 第l种特征图的m张图片的所有anchor的类别 eg.(m, 13, 13, 3, 80)
        true_class_probs = y_true[l][..., 5:]
        """grid(13,13,1,2) gridcell坐标 , raw_pred (m,13,13,3,85)
        pred_xy (m,13,13,3,2), pred_wh (m,13,13,3,2) pred_xy的位置,pred_wh的大小都是相对于gridcell(13,13)而言的. 
        """
        grid, raw_pred, pred_xy, pred_wh = yolo_head(yolo_outputs[l],
                                                     anchors[anchor_mask[l]],
                                                     num_classes, input_shape, calc_loss=True)

        pred_box = K.concatenate([pred_xy, pred_wh])  # (m,13,13,3,4)

        # Darknet raw box to calculate loss.
        """下面raw_true_xy,raw_true_wh 先将groundtruth的 xy wh放大到相对于grid cell尺度,然后取逆转换成和模型预测的txywh一个空间,然后再计算loss.
        (由于sigmoid函数的反函数难计算,所以并没有计算sigmoid的反函数,而是计算输出对应的sigmoid函数值).
        # y_true[0] (m, 13, 13, 3, 85)
        # y_true[l][..., :2] xy y_true[l][..., 2:4] wh 
        # y_true的xy和wh存储的都是相对于原始图像(416,416)的值 (0~1)
        # grid_shapes[[13,13],[26,26],[52,52]] 
        # row_true_xy(m,13,13,3,2) row_true_wh(m,13,13,3,2) 是转换成了相对于当前cell的偏移值,和pred_xy pred_wh同一个尺度.
        """
        raw_true_xy = y_true[l][..., :2]*grid_shapes[l][::-1] - grid
        raw_true_wh = K.log(y_true[l][..., 2:4] /
                            anchors[anchor_mask[l]] * input_shape[::-1])

        # y_true在preprocess_true_boxes函数中初始化的时候是一个zero, 因此只有被选中
        # 的anchor才有值, 其它未被选中的anchor的xywh都是零,因此对wh取log会出现inf,
        # 所以要用switch将inf替换
        raw_true_wh = K.switch(object_mask,
                               raw_true_wh,
                               K.zeros_like(raw_true_wh))  # avoid log(0)=-inf
        """
        85维向量的2:3,3:4是wh 相乘就是box的大小
        大框给小权重,小框给大权重,因为大框的xywh不需要学得那么好,而小框则对xywh很敏感
        为了调整不同大小的预测框所占损失的比重,真值框越小,
        box_loss_scale越大,这样越小的框的损失占比越大,和yolov1,yolov2里采用sqrt(w)的目的一样
        """
        box_loss_scale = 2 - y_true[l][..., 2:3]*y_true[l][..., 3:4]

        # Find ignore mask, iterate over each of batch.
        ignore_mask = tf.TensorArray(
            K.dtype(y_true[0]), size=1, dynamic_size=True)
        object_mask_bool = K.cast(object_mask, 'bool')  # 置信度转换为bool

        def loop_body(b, ignore_mask):
            # 提取置信度为1 (即含有目标)的box
            # y_true[l][b, ..., 0:4] 第l种特征图的第b张图片的xywh (13,13,3,4)
            # true_box: 提取后为(num,4) num是根据object_mask_bool从13*13*3个框中
            # 提取出来的含有object的框
            true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                       object_mask_bool[b, ..., 0])
            # pred_box(m,13,13,3,4)
            # pred_box[b] (13,13,3,4) 第b张图片预测的所有box
            # true_box (num,4)

            """iou (13,13,3,num),代表每个cell中的每个预测的box与真实存在objce的num个box的iou.
            或者看成是(507, num)的矩阵, 矩阵第i行表示预测的第i个box和num个groundtruth的iou, 
            best_iou则为(507,)的数组,表示预测的每个box和所有num个groundtruth iou最大的.  """
            iou = box_iou(pred_box[b], true_box)
            # best_iou (13,13,3), 这张图片预测的所有box(507个,13*13*3)和真实含有objcet的num个box的最好iou
            best_iou = K.max(iou, axis=-1)
            # 如果507个最好的iou都小于阈值,那这张图片就没有目标
            ignore_mask = ignore_mask.write(b,
                                            K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
            return b+1, ignore_mask  # b+1 下一张图片

        # m是batchsize
        _, ignore_mask = K.control_flow_ops.while_loop(lambda b, *args: b < m,
                                                       loop_body,
                                                       [0, ignore_mask])
        ignore_mask = ignore_mask.stack()  # 解析TensorArray为Tensor
        ignore_mask = K.expand_dims(ignore_mask, -1)

        # K.binary_crossentropy is helpful to avoid exp overflow.
        """ 
        # object_mask 第l种特征图的m张图片所有anchor的置信度 eg.(m, 13, 13, 3, 1) 1或0,负责预测物体的anchor是1,其它0
        # raw_pred[...,0:2] (m,13,13,3,2)
        # row_true_xy (m,13,13,3,2)
        # from_logits=True, 先对raw_pred做sigmoid 
        # object_mask只计算负责预测object的box的xy和wh损失
        """
        xy_loss = object_mask * box_loss_scale * \
            K.binary_crossentropy(raw_true_xy,
                                  raw_pred[..., 0:2], from_logits=True)

        wh_loss = object_mask * box_loss_scale * 0.5 * \
            K.square(raw_true_wh-raw_pred[..., 2:4])
        """ 
        # (1-object_mask): 是不负责预测object的anchor
        # ignore_mask: 与所有ground truth IOU都小于阈值的box
        # 两个叠加一起就是: 如果某个anchor不负责预测ground truth 且该anchor预测的框与图中所有ground truth的IOU都小于阈值
        # 则让它预测背景, 如果大于阈值则不参与损失计算(大于阈值的很可能是跟负责预测object的box比较接近的, `不要让这些box和预测背景的box相似`)
        """
        confidence_loss = object_mask * K.binary_crossentropy(object_mask,
                                                              raw_pred[..., 4:5], from_logits=True) + \
            (1-object_mask) * K.binary_crossentropy(object_mask,
                                                    raw_pred[..., 4:5], from_logits=True) * ignore_mask
        """object_mask: 取出负责预测object的box, 让他们预测的类别和真实类别接近. """
        class_loss = object_mask * K.binary_crossentropy(true_class_probs,
                                                         raw_pred[..., 5:], from_logits=True)
        # /mf batchsize梯度
        xy_loss = K.sum(xy_loss) / mf
        wh_loss = K.sum(wh_loss) / mf
        confidence_loss = K.sum(confidence_loss) / mf
        class_loss = K.sum(class_loss) / mf
        loss += xy_loss + wh_loss + confidence_loss + class_loss
        if print_loss:
            loss = tf.Print(loss, [loss, xy_loss, wh_loss, confidence_loss, class_loss, K.sum(
                ignore_mask)], message='loss: ')
    return loss

utils.py

"""Miscellaneous utility functions."""

from functools import reduce

from PIL import Image
import numpy as np
from matplotlib.colors import rgb_to_hsv, hsv_to_rgb


def compose(*funcs):
    """Compose arbitrarily many functions, evaluated left to right.

    Reference: https://mathieularose.com/function-composition-in-python/
    """
    # return lambda x: reduce(lambda v, f: f(v), funcs, x)
    if funcs:
        return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)
    else:
        raise ValueError('Composition of empty sequence not supported.')


def letterbox_image(image, size):
    '''resize image with unchanged aspect ratio using padding'''
    iw, ih = image.size
    w, h = size
    scale = min(w/iw, h/ih)
    nw = int(iw*scale)
    nh = int(ih*scale)

    image = image.resize((nw, nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (128, 128, 128))  # 生成一个绝对灰色的(416,416)图片
    # 将按比例缩放的图片粘贴上去 粘贴在中间,上下两处的灰色部分相同
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))
    return new_image


def rand(a=0, b=1):
    return np.random.rand()*(b-a) + a


def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5, proc_img=True):
    '''random preprocessing for real-time data augmentation'''
    # random = True, 进行数据增强, 否则原图
    # 读取一张图片和它的box, input_shape(416,416)
    # annotation_line format: imagepath x_min,y_min,x_max,y_max,class_id
    # eg: path/to/img1.jpg 50,100,150,200,0 30,50,200,120,3
    line = annotation_line.split()
    image = Image.open(line[0])
    iw, ih = image.size
    h, w = input_shape

    box = np.array([np.array(list(map(int, box.split(','))))
                    for box in line[1:]])

    if not random:
        # resize image
        # 缩放大小
        scale = min(w/iw, h/ih)
        nw = int(iw*scale)
        nh = int(ih*scale)
        # 中心点
        dx = (w-nw)//2
        dy = (h-nh)//2
        image_data = 0
        if proc_img:
            image = image.resize((nw, nh), Image.BICUBIC)  # BICUBIC 立方插值
            new_image = Image.new('RGB', (w, h), (128, 128, 128))  # 背景
            new_image.paste(image, (dx, dy))  # 粘贴图片
            image_data = np.array(new_image)/255.  # 归一化

        # correct boxes
        box_data = np.zeros((max_boxes, 5))
        if len(box) > 0:
            np.random.shuffle(box)
            if len(box) > max_boxes:
                box = box[:max_boxes]  # 每张图片只取max_boxes个ground truth
            # 根据缩放大小,生成新图中的BOX位置
            box[:, [0, 2]] = box[:, [0, 2]]*scale + dx
            box[:, [1, 3]] = box[:, [1, 3]]*scale + dy
            box_data[:len(box)] = box

        return image_data, box_data

    """数据增强.
    ref:https://blog.csdn.net/yangchengtest/article/details/80723366 """
    # resize image
    new_ar = w/h * rand(1-jitter, 1+jitter)/rand(1-jitter, 1+jitter)
    scale = rand(.25, 2)
    if new_ar < 1:
        nh = int(scale*h)
        nw = int(nh*new_ar)
    else:
        nw = int(scale*w)
        nh = int(nw/new_ar)
    image = image.resize((nw, nh), Image.BICUBIC)

    # place image
    dx = int(rand(0, w-nw))
    dy = int(rand(0, h-nh))
    new_image = Image.new('RGB', (w, h), (128, 128, 128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    flip = rand() < .5
    if flip:
        image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # distort image
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand() < .5 else 1/rand(1, sat)
    val = rand(1, val) if rand() < .5 else 1/rand(1, val)
    x = rgb_to_hsv(np.array(image)/255.)
    x[..., 0] += hue
    x[..., 0][x[..., 0] > 1] -= 1
    x[..., 0][x[..., 0] < 0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x > 1] = 1
    x[x < 0] = 0
    image_data = hsv_to_rgb(x)  # numpy array, 0 to 1

    # correct boxes
    box_data = np.zeros((max_boxes, 5))
    if len(box) > 0:
        np.random.shuffle(box)
        box[:, [0, 2]] = box[:, [0, 2]]*nw/iw + dx
        box[:, [1, 3]] = box[:, [1, 3]]*nh/ih + dy
        if flip:
            box[:, [0, 2]] = w - box[:, [2, 0]]
        box[:, 0:2][box[:, 0:2] < 0] = 0
        box[:, 2][box[:, 2] > w] = w
        box[:, 3][box[:, 3] > h] = h
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w > 1, box_h > 1)]  # discard invalid box
        if len(box) > max_boxes:
            box = box[:max_boxes]
        box_data[:len(box)] = box

    return image_data, box_data

train.py

"""
Retrain the YOLO model for your own dataset.
"""

import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping

from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data


def _main():
    annotation_path = 'train.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)  # 80个类的名称
    num_classes = len(class_names)  # 80
    anchors = get_anchors(anchors_path)  # (9,2)

    input_shape = (416, 416)  # multiple of 32, hw

    is_tiny_version = len(anchors) == 6  # default setting
    if is_tiny_version:
        model = create_tiny_model(input_shape, anchors, num_classes,
                                  freeze_body=2, weights_path='model_data/tiny_yolo_weights.h5')
    else:
        model = create_model(input_shape, anchors, num_classes,
                             freeze_body=2, weights_path='model_data/yolo_weights.h5')  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
                                 monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    reduce_lr = ReduceLROnPlateau(
        monitor='val_loss', factor=0.1, patience=3, verbose=1)
    early_stopping = EarlyStopping(
        monitor='val_loss', min_delta=0, patience=10, verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(optimizer=Adam(lr=1e-3), loss={
            # use custom yolo_loss Lambda layer.
            'yolo_loss': lambda y_true, y_pred: y_pred})

        batch_size = 32
        print('Train on {} samples, val on {} samples, with batch size {}.'.format(
            num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
                            steps_per_epoch=max(1, num_train//batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape, anchors, num_classes),
                            validation_steps=max(1, num_val//batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        # recompile to apply the change
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={'yolo_loss': lambda y_true, y_pred: y_pred})
        print('Unfreeze all of the layers.')

        batch_size = 32  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.format(
            num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
                            steps_per_epoch=max(1, num_train//batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape, anchors, num_classes),
                            validation_steps=max(1, num_val//batch_size),
                            epochs=100,
                            initial_epoch=50,
                            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')

    # Further training if needed.


def get_classes(classes_path):
    '''loads the classes'''
    with open(classes_path) as f:
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]
    return class_names


def get_anchors(anchors_path):
    '''loads the anchors from a file'''
    with open(anchors_path) as f:
        anchors = f.readline()
    anchors = [float(x) for x in anchors.split(',')]
    return np.array(anchors).reshape(-1, 2)


def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
                 weights_path='model_data/yolo_weights.h5'):
    '''create the training model'''

    K.clear_session()  # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape  # 416,416
    num_anchors = len(anchors)  # 9

    """y_true: 把ground truth作为模型的输入, 用来在loss层计算loss, 模型的输出就是loss
    [<tf.Tensor 'input_1:0' shape=(None, 13, 13, 3, 25) dtype=float32>,
     <tf.Tensor 'input_2:0' shape=(None, 26, 26, 3, 25) dtype=float32>, 
     <tf.Tensor 'input_3:0' shape=(None, 52, 52, 3, 25) dtype=float32>] 
    """
    y_true = [Input(shape=(h//{0: 32, 1: 16, 2: 8}[l], w//{0: 32, 1: 16, 2: 8}[l],
                           num_anchors//3, num_classes+5)) for l in range(3)]

    # num_anchors//3: 每个cell预测3个box
    model_body = yolo_body(image_input, num_anchors//3, num_classes)
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(
        num_anchors, num_classes))

    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:  # 冻结一些层
            # Freeze darknet53 body or freeze all but 3 output layers.
            num = (185, len(model_body.layers)-3)[freeze_body-1]
            for i in range(num):
                model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(
                num, len(model_body.layers)))

    """
    这里再加一层 loss层,直接把loss作为模型的一个层,最后输出output_shape是一个值
    loss层的输入是 yolo的输出[y1,y2,y3](model_body.output) 和 ground truth(y_true)
    arguments: optional dictionary of keyword arguments to be passed to the function(yolo_loss).
    """
    model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
                        arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})([*model_body.output,
                                                                                                           *y_true])

    model = Model([model_body.input, *y_true], model_loss)

    return model


def create_tiny_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
                      weights_path='model_data/tiny_yolo_weights.h5'):
    '''create the training model, for Tiny YOLOv3'''
    K.clear_session()  # get a new session
    image_input = Input(shape=(None, None, 3))
    h, w = input_shape
    num_anchors = len(anchors)

    y_true = [Input(shape=(h//{0: 32, 1: 16}[l], w//{0: 32, 1: 16}[l],
                           num_anchors//2, num_classes+5)) for l in range(2)]

    model_body = tiny_yolo_body(image_input, num_anchors//2, num_classes)
    print('Create Tiny YOLOv3 model with {} anchors and {} classes.'.format(
        num_anchors, num_classes))

    if load_pretrained:
        model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
        print('Load weights {}.'.format(weights_path))
        if freeze_body in [1, 2]:
            # Freeze the darknet body or freeze all but 2 output layers.
            num = (20, len(model_body.layers)-2)[freeze_body-1]
            for i in range(num):
                model_body.layers[i].trainable = False
            print('Freeze the first {} layers of total {} layers.'.format(
                num, len(model_body.layers)))

    model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
                        arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.7})(
        [*model_body.output, *y_true])
    model = Model([model_body.input, *y_true], model_loss)

    return model


def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
    '''data generator for fit_generator'''

    n = len(annotation_lines)
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(annotation_lines)  # 一个epoch后, 打乱训练集
            image, box = get_random_data(annotation_lines[i],
                                         input_shape, random=True)
            image_data.append(image)
            box_data.append(box)
            i = (i+1) % n
        # one batch data
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(
            box_data, input_shape, anchors, num_classes)
        # *y_true 将list拆分, [image_data, y_true[0], y_true[1], y_true[2]]
        yield [image_data, *y_true], np.zeros(batch_size)


def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
    n = len(annotation_lines)
    if n == 0 or batch_size <= 0:
        return None
    return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)

if __name__ == '__main__':
    _main()
  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
Linux创始人LinusTorvalds有一句名言:Talk is cheap, Show me the code.(冗谈不够,放码过来!)。 代码阅读是从入门到提高的必由之路。尤其对深度学习,许多框架隐藏了神经网络底层的实现,只能在上层调包使用,对其内部原理很难认识清晰,不利于进一步优化和创新。  YOLOv3是一种基于深度学习的端到端实时目标检测方法,以速度快见长。YOLOv3的实现Darknet是使用C语言开发的轻型开源深度学习框架,依赖少,可移植性好,可以作为很好的代码阅读案例,让我们深入探究其实现原理。  本课程将解析YOLOv3的实现原理和源码,具体内容包括: YOLO目标检测原理  神经网络及Darknet的C语言实现,尤其是反向传播的梯度求解和误差计算 代码阅读工具及方法 深度学习计算的利器:BLAS和GEMM GPU的CUDA编程方法及在Darknet的应用 YOLOv3的程序流程及各层的源码解析本课程将提供注释后的Darknet的源码程序文件。  除本课程《YOLOv3目标检测:原理与源码解析》外,本人推出了有关YOLOv3目标检测的系列课程,包括:   《YOLOv3目标检测实战:训练自己的数据集》  《YOLOv3目标检测实战:交通标志识别》  《YOLOv3目标检测:原理与源码解析》  《YOLOv3目标检测:网络模型改进方法》 建议先学习课程《YOLOv3目标检测实战:训练自己的数据集》或课程《YOLOv3目标检测实战:交通标志识别》,对YOLOv3的使用方法了解以后再学习本课程。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值