CV-1-目标检测-03-RCNN-02-data_utils

# -- encoding:utf-8 --


import config as cfg
from selectivesearch import selective_search

from collections import defaultdict
import os
import pickle
import cv2 as cv
import numpy as np
from sklearn.preprocessing import OneHotEncoder


def check_directory(path, created=True, error=False):
    """
    检查文件或者文件夹路径path是否存在,如果不存在,根据参数created和error进行操作<br/>
    要求created和error只能有一个为True
    :param path:
    :param created:  当文件夹不存在的时候,进行创建
    :param error:  当path不存在的时候,报错
    :return:
    """
    flag = os.path.exists(path)
    if not flag:
        if created:
            os.makedirs(path)
            flag = True
        elif error:
            raise Exception("Path must exists!!{}".format(path))
    return flag


def resize_image(in_image, new_width, new_height, out_image=None, resize_mode=cv.INTER_CUBIC):
    """
    进行图像大小重置操作
    :param in_image:  输入的图像
    :param new_width:  新的宽度
    :param new_height:  新的高度
    :param out_image:  输出对象位置路径
    :param resize_mode:  大小重置方式
    :return:
    """
    image = cv.resize(in_image, (new_width, new_height), resize_mode)
    if out_image:
        cv.imwrite(out_image, image)
    return image


def iou(box1, box2):
    """
    计算边框1和边框2的IoU的值
    :param box1:  边框1的坐标值[左上角坐标的x,左上角坐标的y,右下角坐标的x,右下角坐标的y]
    :param box2:  边框2的坐标值[左上角坐标的x,左上角坐标的y,右下角坐标的x,右下角坐标的y]
    :return:
    """
    # 1. 提取边框信息并排序
    x = [(box1[0], 1), (box1[2], 1), (box2[0], 2), (box2[2], 2)]
    y = [(box1[1], 1), (box1[3], 1), (box2[1], 2), (box2[3], 2)]
    x = sorted(x, key=lambda t: t[0])
    y = sorted(y, key=lambda t: t[0])

    # 2. 计算重叠区域(查看排序之后,边框的顺序是否打乱,也就是第一个值和第二值是否来自一个边框,如果来自一个边框,那么表示没有重叠)
    if x[0][1] == x[1][1] or y[0][1] == y[1][1]:
        union_area = 0.0
    else:
        union_area = (x[2][0] - x[1][0]) * (y[2][0] - y[1][0])

    # 3. 计算IoU的值
    box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
    box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
    total_area = box1_area + box2_area - union_area

    # 4. 返回IoU的值
    return 1.0 * union_area / total_area


def fetch_selective_search_images(file_path, image_width=227, image_height=227):
    """
    获取给定图像对应的候选区域图形以及候选区域的坐标
    :param file_path:
    :param image_width:
    :param image_height:
    :return:
    """
    try:
        result = []
        # 1. 读取图像形成Image对象
        image = cv.imread(file_path)

        # 2. 通过Selective Search获取ROI候选区域边框
        _, regions = selective_search(image, scale=500, sigma=0.9, min_size=10)

        # 3. 遍历所有的ROI区域,获取图形以及坐标信息
        candidate = set()
        for idx, region in enumerate(regions):
            # a. 提取当前候选框对应的特征信息
            rect = region['rect']
            size = region['size']
            # 提取坐标信息(左上角坐标、右小角坐标、宽度、高度、中心点坐标)
            lr_x, lr_y, pw, ph = rect
            rr_x = lr_x + pw
            rr_y = lr_y + ph

            # b. 做过滤操作
            if rect in candidate:
                continue
            if size < 200:
                continue
            if pw * ph < 500:
                continue

            # c. 记录当前区域经过处理了
            candidate.add(rect)

            # d. 获取当前ROI区域候选框对应的特征信息
            # 1. 获取区域图像建议
            region_proposal = image[lr_y:rr_y, lr_x:rr_x]

            # 2. 对区域建议图形进行resize大小重置
            region_proposal = resize_image(region_proposal,
                                           new_width=image_width,
                                           new_height=image_height)

            # 3. 添加数据
            result.append([region_proposal, [lr_x, lr_y, rr_x, rr_y]])

        # 4. 对象转换并返回
        result = np.asarray(result)
        return result
    except:
        return None


def make_training_data(in_file, output_data_file, output_label_file, image_width=227, image_height=227):
    """
    构造最原始的数据集,也就是从训练原始图像数据中提取ROI的候选框区域以及相关的属性值
    :param in_file: 训练用原始数据存在的txt文件
    :param output_data_file: 提取出来的特征属性+目标属性存在的路径
    :param output_label_file:  提取出来的class name和id之间映射关系的数据存在的路径
    :param image_width:  ROI区域图形的最终宽度
    :param image_height:  ROI区域图形的最终宽度
    :return:
    """
    # 0. 检查输入的文件是否存在、输出文件所在的文件夹是否存在
    check_directory(in_file, created=False, error=True)
    check_directory(os.path.dirname(output_data_file))
    check_directory(os.path.dirname(output_label_file))

    # 1. 获取得到fine tune数据对应的根目录文件夹
    root_dir = os.path.dirname(os.path.abspath(in_file))

    # 2. 遍历fine tune数据对应的文件内容
    class_name_2_index_dict = {}
    current_class_index = 1     # 标签类别,从1开始编码,0留给背景
    with open(in_file, 'r', encoding='utf-8') as reader:
        datas = []
        for line in reader:
            # a. 分割数据
            values = line.strip().split(" ")

            # b. 异常数据过滤
            if len(values) != 3:
                continue

            # c. 正常数据做处理

            # 1. 解析参数(得到图片路径、类别id、坐标信息)
            #    a. 获取得到原始图形的路径
            image_file_path = os.path.join(root_dir, values[0])
            #    b. 获取得到类别id
            class_name = values[1].strip()
            try:
                image_label = class_name_2_index_dict[class_name]
            except KeyError:
                image_label = current_class_index
                class_name_2_index_dict[class_name] = image_label
                current_class_index += 1
            #    c. 获取坐标信息 文件中原始信息为[left_x, left_y, height, width] ---->转换为:(左上角坐标、右小角坐标、宽度、高度、中心点坐标)
            l_x, l_y, gw, gh = list(map(int, values[2].split(",")))
            r_x = l_x + gw
            r_y = l_y + gh
            gx = (l_x + r_x) // 2
            gy = (l_y + r_y) // 2

            # 2. 读取图像形成Image对象
            image = cv.imread(image_file_path)

            # 3. 获取Ground Truth的边框区域图形
            ground_truth_image = image[l_y:r_y, l_x:r_x]

            # 4. 通过Selective Search获取ROI候选区域边框
            _, regions = selective_search(image, scale=500, sigma=0.9, min_size=10)

            # 5. 遍历所有的ROI区域,计算各个区域和GT的IoU值等信息
            candidate = set()
            for idx, region in enumerate(regions):
                # a. 提取当前候选框对应的特征信息
                rect = region['rect']  # 区域信息
                size = region['size']  # 像素大小信息
                # 提取坐标信息(左上角坐标、右小角坐标、宽度、高度、中心点坐标)
                lr_x, lr_y, pw, ph = rect
                rr_x = lr_x + pw
                rr_y = lr_y + ph
                px = (lr_x + rr_x) // 2
                py = (lr_y + rr_y) // 2

                # b. 做过滤操作
                if rect in candidate:
                    # 通过ss获取的rect区域会有很多重复的,所以若重复跳过本轮
                    continue
                if size < 200:
                    continue
                if pw * ph < 500:  # 面积大小
                    continue

                # c. 记录当前区域经过处理了
                candidate.add(rect)

                # d. 获取当前ROI区域候选框对应的特征信息
                # 1. 获取区域图像建议 ROI
                region_proposal = image[lr_y:rr_y, lr_x:rr_x]

                # 2. 计算RoI区域和GT区域的IoU的值
                region_iou = iou(
                    box1=[l_x, l_y, r_x, r_y],
                    box2=[lr_x, lr_y, rr_x, rr_y]
                )

                # 3. 对区域建议图形进行resize大小重置
                region_proposal = resize_image(region_proposal,
                                               new_width=image_width,
                                               new_height=image_height)

                # 3. 计算偏移量信息(即回归的监督信号)  offset
                tx = (gx - px) / pw
                ty = (gy - py) / ph
                tw = np.log(gw / pw)
                th = np.log(gh / ph)
                offset_box = [tx, ty, tw, th]

                # c. 添加特征信息的数据
                # 需要的数据格式: 区域图像image、区域图形所属物体类别(原始图像的所属类别)、候选框类型(0真实框、1区域建议候选框)、IoU、offset_box(偏移量的值 即监督信号)
                data = []
                # 添加图形数据
                data.append(region_proposal)
                # 添加图形所属类别(这里暂不考虑是否是背景)
                data.append(image_label)
                # 添加候选框的类型(0真实框、1区域建议候选框)
                data.append(1)
                # 添加IoU
                data.append(region_iou)
                # 添加回归训练用的目标属性(监督信号): offset box
                data.append(offset_box)
                datas.append(data)

            # fixme 6. 添加真实框GT的信息
            # 需要的数据格式: 区域图像image、区域图形所属物体类别(原始图像的所属类别)、候选框类型(0真实框、1区域建议候选框)、IoU、offset_box(偏移量的值)
            data = []
            # 添加图形数据
            ground_truth_image = resize_image(ground_truth_image,
                                              new_width=image_width,
                                              new_height=image_height)
            data.append(ground_truth_image)
            # 添加图形所属类别
            data.append(image_label)
            # 添加候选框的类型(0真实框、1区域建议候选框)
            data.append(0)
            # 添加IoU
            data.append(1.0)
            # 添加回归训练用的目标属性: offset box
            data.append([0, 0, 0, 0])
            datas.append(data)

    # 3. 数据持久化操作
    np.save(output_data_file, datas)
    with open(output_label_file, 'wb') as writer:
        pickle.dump(class_name_2_index_dict, writer)


def calc_ground_truth_predict_bounding_box(predict_bounding_boxs, box_offsets, as_int=True):
    """
    基于边框和偏移值计算最终的坐标位置
    :param predict_bounding_boxs:
    :param box_offsets:
    :param as_int:
    :return:
    """
    final_boxs = []
    for (x1, y1, x2, y2), (tx, ty, tw, th) in zip(predict_bounding_boxs, box_offsets):
        # 计算中心点、宽度和高度
        px = (x1 + x2) // 2
        py = (y1 + y2) // 2
        pw = (x2 - x1)
        ph = (y2 - y1)

        # 计算转换后中心点、宽度和高度
        gx = tx * pw + px
        gy = ty * ph + py
        gw = np.exp(tw) * pw
        gh = np.exp(th) * ph

        # 计算新的做左上角点和右下角点的坐标
        new_x1 = gx - (gw // 2)
        new_y1 = gy - (gh // 2)
        new_x2 = new_x1 + gw
        new_y2 = new_y1 + gh
        final_boxs.append([new_x1, new_y1, new_x2, new_y2])

    if as_int:
        # 边框位置,类型强制转换
        final_boxs = np.asarray(final_boxs).astype(np.int)
    else:
        final_boxs = np.asarray(final_boxs)
    return final_boxs


def nms(boxes, probs, boxes_labels, prob_threshold=0.5):
    """
    计算nms
    :param boxes:
    :param probs:
    :param boxes_labels:
    :param prob_threshold:
    :return:
    """
    # 1. 获取数量、索引集合
    n = np.shape(boxes)[0]
    tmp_probs = np.copy(probs)
    ious = np.zeros((n, n))
    result_indexs = []
    result_probs = []

    # 2. 计算所有边框与边框之间的IoU的值
    for i in range(n):
        ious[i, i] = 1.0
        label_i = boxes_labels[i]
        for j in range(i + 1, n):
            label_j = boxes_labels[j]
            if label_i == label_j:
                # 仅仅计算相同类别的边框的IoU阈值
                region_iou = iou(box1=boxes[i], box2=boxes[j])
                ious[i, j] = region_iou
                ious[j, i] = region_iou

    # 3. 遍历数据
    while True:
        # a. 获取概率最大的那个边框对应的下标
        max_prob_index = np.argmax(tmp_probs)

        # b. 获取下标对应的概率值
        max_prob = tmp_probs[max_prob_index]

        # c. 当最大的概率值小于阈值的时候,表示没有边框再需要合并了
        if max_prob < prob_threshold:
            break

        # d. 获取所有边框和当前边框的IoU的值
        tmp_ious = ious[max_prob_index]

        # e. IoU值过滤
        tmp_indexs = np.where(tmp_ious >= cfg.SVM_PREDICT_PROBABILITY_WITH_NMS_IOU_THRESHOLD)[0]

        # f. 遍历索引
        for index in tmp_indexs:
            # 相当于删除的操作
            tmp_probs[index] = 0.0

        # g. 添加数据
        result_indexs.append(max_prob_index)
        result_probs.append(max_prob)
    return result_indexs, result_probs


class FlowerDataLoader(object):
    def __init__(self, one_hot=True):
        # 设置相关属性
        self.image_width = cfg.IMAGE_WIDTH
        self.image_height = cfg.IMAGE_HEIGHT

        self.fine_tune_positive_batch_size = cfg.FINE_TUNE_POSITIVE_BATCH_SIZE
        self.fine_tune_negative_batch_size = cfg.FINE_TUNE_NEGATIVE_BATCH_SIZE
        self.fine_tune_iou_threshold = cfg.FINE_TUNE_IOU_THRESHOLD

        self.higher_features_iou_threshold = cfg.HIGHER_FEATURES_IOU_THRESHOLD
        self.higher_features_batch_size = cfg.HIGHER_FEATURES_BATCH_SIZE

        self.regression_box_iou_threshold = cfg.REGRESSION_BOX_IOU_THRESHOLD
        self.regression_box_batch_size = cfg.REGRESSION_BOX_BATCH_SIZE

        # Fine Tune训练相关变量
        fine_tune_X = []
        fine_tune_Y = []
        total_fine_tune_samples = 0     # 总样本数量
        fine_tune_positive_samples = 0      # 正样本计数
        fine_tune_negative_samples = 0      # 负样本计数
        fine_tune_positive_samples_index = []   # 正样本索引下标保存的list
        fine_tune_negative_samples_index = []   # 负样本索引下标保存的list

        higher_features_Y = []
        higher_features_label_2_samples_index = defaultdict(list)
        higher_features_label_2_negative_samples = defaultdict(int)
        higher_features_label_2_positive_samples = defaultdict(int)

        regression_box_higher_features_Y = []
        regression_box_higher_features_samples_index = []

        # 一、加载Fine Tune微调训练用的X、Y的值形成的NumPy的对象
        # 1. 提取训练数据对应的Selective Search的边框值(检查SS的结果磁盘文件是否存在,如果不存在,就构造生成SS结果数据,如果存在,就直接加载)
        print("Start load training data.....")
        if not check_directory(cfg.TRAIN_DATA_FILE_PATH, False, False):
            # 若训练数据文件不存在,则重新生成一个。
            print("Training data file not exists, So load traning data and save to file.....")
            make_training_data(in_file=cfg.ORIGINAL_FINE_TUNE_DATA_FILE_PATH,
                               output_data_file=cfg.TRAIN_DATA_FILE_PATH,
                               output_label_file=cfg.TRAIN_LABEL_DICT_FILE_PATH,
                               image_width=self.image_width, image_height=self.image_height)
        datas = np.load(cfg.TRAIN_DATA_FILE_PATH, allow_pickle=True)
        # print(datas)  # 执行一下,看下代码有无问题
        # print(pickle.load(open(cfg.TRAIN_LABEL_DICT_FILE_PATH, 'rb')))

        # 2. 遍历边框值,得到训练可用的X和Y的值
        for idx, (image, label, box_type, region_iou, box) in enumerate(datas):
            # 加载图像数据
            fine_tune_X.append(image)
            total_fine_tune_samples += 1

            # 加载fine tune的标签相关数据
            if region_iou > self.fine_tune_iou_threshold:
                # 正样本
                fine_tune_Y.append(label)
                fine_tune_positive_samples_index.append(idx)
                fine_tune_positive_samples += 1
            else:
                # 负样本
                fine_tune_Y.append(0)  # 0代表背景
                fine_tune_negative_samples_index.append(idx)
                fine_tune_negative_samples += 1

            # 加载svm高阶特征训练相关的数据(需要区分类别)
            if region_iou < self.higher_features_iou_threshold:
                # 负例样本: IoU < 0.3
                higher_features_label_2_negative_samples[label] += 1
                higher_features_Y.append(0)
                higher_features_label_2_samples_index[label].append(idx)
            else:
                # 因为idx是针对原样本的,所以需要在这里append label,而不是在if 条件判断内。否则:higher_features_Y数量就不对,导致idx值也不对了。
                higher_features_Y.append(label)
                if int(box_type) == 0:    # 正样本(仅限Ground truth)
                    higher_features_label_2_positive_samples[label] += 1
                    higher_features_label_2_samples_index[label].append(idx)

            # 加载Regression回归模型以来的高阶特征训练相关数据
            regression_box_higher_features_Y.append(box)
            if region_iou >= self.regression_box_iou_threshold:
                regression_box_higher_features_samples_index.append(idx)

        print("加载训练数据完成! 总样本数量:{}".format(total_fine_tune_samples))
        print("微调的正样本数量:{}, 负样本数量:{}".format(fine_tune_positive_samples, fine_tune_negative_samples))
        print("Higher Features positive sample:{}, negative example:{}".format(higher_features_label_2_positive_samples,
                                                                               higher_features_label_2_negative_samples))
        print("Regression Box samples: {}".format(len(regression_box_higher_features_samples_index)))

        # 进行Fine Tune相关数据赋值操作
        self.fine_tune_x = np.asarray(fine_tune_X)
        if one_hot:
            # 对需要做哑编码操作的进行对应操作
            one_hot_encoder = OneHotEncoder(sparse=False, categories='auto')
            self.fine_tune_y = np.asarray(one_hot_encoder.fit_transform(np.reshape(fine_tune_Y, (-1, 1))))
            pass
        else:
            self.fine_tune_y = np.asarray(fine_tune_Y).reshape((-1, 1))
        self.total_fine_tune_samples = total_fine_tune_samples
        self.fine_tune_positive_samples = fine_tune_positive_samples
        self.fine_tune_negative_samples = fine_tune_negative_samples
        self.fine_tune_positive_cursor = 0
        self.fine_tune_negative_cursor = 0
        self.fine_tune_positive_samples_index = np.asarray(fine_tune_positive_samples_index)
        self.fine_tune_negative_samples_index = np.asarray(fine_tune_negative_samples_index)
        np.random.shuffle(self.fine_tune_positive_samples_index)
        np.random.shuffle(self.fine_tune_negative_samples_index)

        # 进行svm高阶特征获取的相关数据给定
        # 计算各个类别的总样数目
        total_higher_features_label_2_samples = higher_features_label_2_negative_samples.copy()
        for key, value in higher_features_label_2_positive_samples.items():
            total_higher_features_label_2_samples[key] += value
        self.total_higher_features_label_2_samples = total_higher_features_label_2_samples
        # 样本赋值
        self.higher_features_y = np.asarray(higher_features_Y)
        # 各个类别的可选样本索引下标赋值
        self.higher_features_label_2_samples_index = higher_features_label_2_samples_index
        # 给定各个类别是否还存在下一个批次的变量赋值
        self._has_next_higher_features_batch_of_label = {}
        # 给定各个类别的批次获取过程中的游标变量赋值
        self.higher_features_label_2_cursor = {}
        for label in higher_features_label_2_samples_index.keys():
            self._has_next_higher_features_batch_of_label[label] = True
            self.higher_features_label_2_cursor[label] = 0
            np.random.shuffle(self.higher_features_label_2_samples_index[label])

        # 边框高阶特征获取相关属性赋值(批次获取数据需要的相关属性: 批次大小、总样本数目、可选的索引下标组成的集合、样本列表、批次位置索引/游标、是否存在下一个批次数据的变量值)
        self.total_regression_box_higher_features_samples = len(regression_box_higher_features_samples_index)
        self.regression_box_higher_features_samples_index = regression_box_higher_features_samples_index
        self.regression_box_higher_features_Y = np.asarray(regression_box_higher_features_Y)
        self.regression_box_higher_features_cursor = 0
        self._has_next_regression_box_batch = True

        # 进行Regression模型训练的相关属性给定
        self.regression_batch_size = cfg.REGRESSION_BATCH_SIZE
        self.regression_x = None
        self.regression_y = None
        self.regression_cursor = 0
        self.total_regression_samples = 0
        self.regression_index = None
        self.reload_regression_box_train_data()

    def __fetch_batch(self, batch_size, cursor, total_samples, x, y, index):
        """
        基于给定的数据获取当前批次的数据(X\Y)以及下一个批次获取前是否需要进行数据的重置操作
        """
        need_reset_data = False
        # 1. 计算当前开始的下标、结束的下标
        start_idx = cursor * batch_size
        end_idx = start_idx + batch_size

        # 2. 如果已经数据的尾巴的位置的话,那么需要在下一次获取数据之前,重置数据
        if end_idx >= total_samples:
            need_reset_data = True

        # 3. 获取样本下标
        sample_index = index[start_idx:end_idx]

        # 4. 基于下标获取对象的样本
        images = x[sample_index]
        labels = y[sample_index]

        # 5. 返回结果
        return images, labels, need_reset_data

    def get_fine_tune_batch(self):
        """
        按照给定的属性获取正样本和负样本,并合并返回
        :return:
        """
        # 一、获取正样本
        positive_images, positive_labels, flag = self.__fetch_batch(
            batch_size=self.fine_tune_positive_batch_size,
            cursor=self.fine_tune_positive_cursor,
            total_samples=self.fine_tune_positive_samples,
            x=self.fine_tune_x,
            y=self.fine_tune_y,
            index=self.fine_tune_positive_samples_index)

        if flag:
            print("Reset fine tune positive samples!!!")
            self.fine_tune_positive_cursor = 0
            np.random.shuffle(self.fine_tune_positive_samples_index)
        else:
            self.fine_tune_positive_cursor += 1

        # 二、获取负样本
        negative_images, negative_labels, flag = self.__fetch_batch(
            batch_size=self.fine_tune_negative_batch_size,
            cursor=self.fine_tune_negative_cursor,
            total_samples=self.fine_tune_negative_samples,
            x=self.fine_tune_x,
            y=self.fine_tune_y,
            index=self.fine_tune_negative_samples_index)
        if flag:
            print("Reset fine tune negative samples!!!")
            self.fine_tune_negative_cursor = 0
            np.random.shuffle(self.fine_tune_negative_samples_index)
        else:
            self.fine_tune_negative_cursor += 1

        # 三、数据合并
        images = np.concatenate([positive_images, negative_images], axis=0)
        labels = np.concatenate([positive_labels, negative_labels], axis=0)

        return images, labels

    def has_next_structure_higher_features_batch(self, label):
        """
        获取当前类别是否还有下一个批次的数据
        :param label:
        :return:
        """
        if label in self._has_next_higher_features_batch_of_label:
            return self._has_next_higher_features_batch_of_label[label]
        else:
            return False

    def reset_structure_higher_features_batch(self):
        """
        重置数据的读取
        :return:
        """
        for label in self._has_next_higher_features_batch_of_label.keys():
            self._has_next_higher_features_batch_of_label[label] = True
            self.higher_features_label_2_cursor[label] = 0
            np.random.shuffle(self.higher_features_label_2_samples_index[label])

    def get_structure_higher_features(self, label):
        """
        基于给定的标签获取训练svm用的原始数据
        :param label:
        :return:
        """
        if self.has_next_structure_higher_features_batch(label):
            images, labels, flag = self.__fetch_batch(
                batch_size=self.higher_features_batch_size,
                cursor=self.higher_features_label_2_cursor[label],
                total_samples=self.total_higher_features_label_2_samples[label],
                x=self.fine_tune_x,
                y=self.higher_features_y,
                index=self.higher_features_label_2_samples_index[label])
            if flag:
                self._has_next_higher_features_batch_of_label[label] = False
            else:
                self.higher_features_label_2_cursor[label] += 1
            return images, labels
        else:
            return None, None

    def has_next_regression_box_batch(self):
        """
        返回是否存在下一个批次的回归原始特征数据的获取
        :return:
        """
        return self._has_next_regression_box_batch

    def reset_regression_box_batch(self):
        """
        重置数据的读取
        :return:
        """
        self._has_next_regression_box_batch = True
        self.regression_box_higher_features_cursor = 0

    def get_regression_box_batch(self):
        """
        获取当前批次对应的回归训练原始特征数据(生成训练回归模型的原始图像数据)
        :return:
        """
        if self.has_next_regression_box_batch():
            images, labels, flag = self.__fetch_batch(
                batch_size=self.regression_box_batch_size,
                cursor=self.regression_box_higher_features_cursor,
                total_samples=self.total_regression_box_higher_features_samples,
                x=self.fine_tune_x,
                y=self.regression_box_higher_features_Y,
                index=self.regression_box_higher_features_samples_index)
            if flag:
                self._has_next_regression_box_batch = False
            else:
                self.regression_box_higher_features_cursor += 1
            return images, labels
        else:
            return None, None

    def reload_regression_box_train_data(self):
        """
        从磁盘加载回归训练用的数据集
        :return:
        """
        # 文件路径获取
        regression_higher_features_save_path = cfg.TRAIN_REGRESSION_HIGHER_FEATURES_DATA_FILE_PATH
        if check_directory(regression_higher_features_save_path, created=False, error=False):
            # 1. 加载数据
            data = np.load(regression_higher_features_save_path)

            # 2. 数据的分割
            regression_x, regression_y = np.split(data, indices_or_sections=(np.shape(data)[1] - 4,), axis=1)

            # 3. 属性赋值
            self.regression_x = regression_x
            self.regression_y = regression_y
            self.regression_cursor = 0
            self.total_regression_samples = np.shape(data)[0]
            self.regression_index = np.random.permutation(self.total_regression_samples)
            return True
        else:
            return False

    def get_regression_box_train_batch(self):
        """
        获取Conv5层输出用于回归模型训练的高阶特征数据
        :return:
        """
        if self.regression_x is None:
            if not self.reload_regression_box_train_data():
                raise Exception("No have Regression Box Data.")

        images, labels, flag = self.__fetch_batch(
            batch_size=self.regression_batch_size,
            cursor=self.regression_cursor,
            total_samples=self.total_regression_samples,
            x=self.regression_x,
            y=self.regression_y,
            index=self.regression_index)
        if flag:
            self.regression_cursor = 0
            np.random.shuffle(self.regression_index)
        else:
            self.regression_cursor += 1
        return images, labels


if __name__ == '__main__':
    # 1、演示读取预训练数据:
    # print(pickle.load(open('..\\datas\\rcnn\\label_dict.pkl', 'rb')))
    # train_datas = np.load('../datas/rcnn/traning_data.npy', allow_pickle=True)
    # print(train_datas[0])
    # cv.imshow('tupian', train_datas[0][0])
    # cv.waitKey(0)
    # cv.destroyAllWindows()


    # 2、测试获取训练数据
    dataloader = FlowerDataLoader()
    # for i in range(2):
    #     images, labels = dataloader.get_fine_tune_batch()
    #     print(np.shape(images))
    #     print(np.argmax(labels, axis=1))

    # while dataloader.has_next_structure_higher_features_batch(2):
    #     images, labels = dataloader.get_structure_higher_features(2)
    #     print(np.shape(images))

    while dataloader.has_next_regression_box_batch():
        images, labels = dataloader.get_regression_box_batch()
        print(np.shape(images))

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值