目标识别1-YOLOv4模型中国交通标志识别

1.数据集下载

http://cg.cs.tsinghua.edu.cn/traffic-sign/

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

2.Yolov4模型下载

  • README.md
https://gitee.com/xtlower/yolov4-pytorch?_from=gitee_search

在这里插入图片描述
在这里插入图片描述

3.数据集分析1

# 3.1 格式化文件:format_json.py
import json
with open('./instances_val2017.json', 'rb') as fp:
    data = json.load(fp)
print(type(data))
print(data.keys())
with open('./instances_val2017_formatted.json', 'w') as fp:
    json.dump(data, fp, indent=4, sort_keys=True, ensure_ascii=False)

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述


# 3.2 jupyter notebook 数据集分析
import json
with open('./annotations_all_formatted.json', 'rb') as fp:  # 读文件
    obj = json.load(fp)
import jsonpath  # 统计每个类别出现的次数
cats = jsonpath.jsonpath(obj, '$..category')  # 把obj中所有category拿到
len(cats)
27346
len(obj['types'])
232
from collections import Counter
c = Counter(cats)  # 统计cats中每个类别出现的次数可用Counter
c
Counter({'ph2': 12,
         'pl40': 1413,
...
         'w62': 1,
         'w44': 1})
len(c)
201

# 3.3 画图显示类别分布
import matplotlib.pyplot as plt
import numpy as np
plt.figure(figsize=(7, 40))  
plt.barh(np.arange(len(c.keys())), c.values())
_ = plt.yticks(np.arange(len(c.keys())), c.keys())

在这里插入图片描述

4.数据集预处理

new_c = dict((k, v) for k, v in c.items() if v >= 5)  # 取出最少出现5次类别, 用来参与训练
new_c.keys()
dict_keys(['ph2', 'pl40', 'pl30', 'p26', 'p27', 'i5', 'pne', 'pr60', 'ph4.5', 'pr20', 'pbp', 'ip', 'p5', 'pn', 'p11', 'w30', 'p9', 'pm20', 'pl5', 'i4', 'pl70', 'pl80', 'il60', 'il70', 'il80', 'ph4', 'pl100', 'pl50', 'i2', 'im', 'pdd', 'pg', 'w59', 'p3', 'pm30', 'p1', 'pl60', 'p12', 'p17', 'p18', 'pr50', 'pn-2', 'il110', 'il90', 'pl120', 'w57', 'p10', ...
'pl10', 'phclr', 'i12', 'pm2', 'pl90', 'p14', 'pm8', 'pm40', 'p2', 'i3', 'p25', 'pm25', 'p15', 'w8', 'w24', 'w41', 'pmr', 'i1', 'w20', 'pr80', 'w10', 'i14', 'ph4.8', 'phcs', 'pctl', 'pt', 'ph4.3', 'w18', 'i15'])

5.数据集转换

  • tt100k2coco.py
import json
import os
import cv2
_type = 'test'
src = '../../tt100k_2021'
anno = os.path.join(src, 'annotations_all.json')
dataset = {'categories': [], 'images': [], 'annotations': []}
categories = ['__background__']
with open(anno, 'r') as f:
    annotations = json.load(f)
cat = ['ph2', 'pl40', 'pl30', 'p26', 'p27', 'i5', 'pne', 'pr60', 'ph4.5', 'pr20', 'pbp', 'ip', 'p5', 'pn', 'p11', 'w30', 'p9', 'pm20', 'pl5', 'i4', 'pl70', 'pl80', 'il60', 'il70', 'il80', 'ph4', 'pl100', 'pl50', 'i2', 'im', 'pdd', 'pg', 'w59', 'p3', 'pm30', 'p1', 'pl60', 'p12', 'p17', 'p18', 'pr50', 'pn-2', 'il110', 'il90', 'pl120', 'w57', 'p10', 'pm15', 'w55', 'i2r', 'p13', 'pl20', 'w13', 'i4l', 'pm55', 'w63', 'pss', 'pcr', 'p16', 'w32', 'pb', 'w21', 'il100', 'ph5', 'pl110', 'w58', 'pc', 'p23', 'p19', 'pbm', 'ps', 'pl25', 'pl35', 'pr40', 'w45', 'ph2.5', 'p6', 'pcl', 'w42', 'ph3.5', 'ph3.3', 'w22', 'w43', 'pm50', 'pm5', 'ph4.2', 'p29', 'pcd', 'w3', 'pr70', 'p8', 'pl15', 'pa13', 'ph2.8', 'pa14', 'wc', 'ph3', 'w47', 'pm10', 'il50', 'pr30', 'iz', 'w46', 'ph2.2', 'pm35', 'p1n', 'i10', 'w26', 'w34', 'pw3.2', 'pw4', 'pa10', 'pm46', 'pmb', 'i13', 'pl10', 'phclr', 'i12', 'pm2', 'pl90', 'p14', 'pm8', 'pm40', 'p2', 'i3', 'p25', 'pm25', 'p15', 'w8', 'w24', 'w41', 'pmr', 'i1', 'w20', 'pr80', 'w10', 'i14', 'ph4.8', 'phcs', 'pctl', 'pt', 'ph4.3', 'w18', 'i15']
for i, c in enumerate(cat, 1):
    dataset['categories'].append(
            {'id': i, 'name': c, 'supercategory': c[0]})
    categories.append(c)
imgs = annotations['imgs']  # imgs本身是个字典
types = annotations['types']
g_index = 1
for idx, (img, info) in enumerate(imgs.items(), 1):
    if _type in info['path']:
        bgr = cv2.imread(os.path.join(src, info['path']))
        h, w, _ = bgr.shape
        objects = info['objects']
        dataset['images'].append({
                'file_name': info['path'].split('/')[-1],
                'id': idx,
                'width': w,
                'height': h})
        for obj in objects:
            bbox = obj['bbox']
            x1 = bbox['xmin']
            y1 = bbox['ymin']
            x2 = bbox['xmax']
            y2 = bbox['ymax']
            label = obj['category']
            segmentation = [[x1, y1, x2, y1, x2, y2, x1, y2]]
            width = max(0, x2 - x1)
            height = max(0, y2 - y1)
            if label not in categories:
                continue
            dataset['annotations'].append({
                    'area': width * height,
                    'bbox': [x1, y1, width, height],
                    'category_id': categories.index(label),
                    'id': g_index,
                    'image_id': idx,
                    'iscrowd': 0,
                    'segmentation': segmentation})
            g_index += 1
json_name = os.path.join(src, f'TT100K_CoCo_format_{_type}.json')
with open(json_name, 'w') as f:
    json.dump(dataset, f)
print('done')

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

6.数据集分析 2

  • coco工具包的使用
from pycocotools.coco import COCO
json_path = '../tt100k_2021/annotations/TT100K_CoCo_format_train.json'
coco = COCO(annotation_file=json_path)
loading annotations into memory...
Done (t=0.12s)
creating index...
index created!

coco.anns  # 注释信息
{1: {'area': 3519.984399989997,
  'bbox': [1580.0, 758.667, 58.66669999999999, 59.99969999999996],
  'category_id': 64,
  'id': 1,
  'image_id': 1,
  'iscrowd': 0,
  'segmentation': [[1580.0,
    758.667,
    1638.6667,
    758.667,
    1638.6667,
    818.6667,
    1580.0,
    818.6667]]},
 ...}
 
coco.imgs  # 图片信息
{1: {'file_name': '62627.jpg', 'id': 1, 'width': 2048, 'height': 2048},
 ...}
 
coco.cats  # 类别信息
{1: {'id': 1, 'name': 'ph2', 'supercategory': 'p'},
...
 144: {'id': 144, 'name': 'i15', 'supercategory': 'i'}}
 
ids = list(sorted(coco.imgs.keys())) # 图片id
len(ids)  # 图片数量
6034

coco.loadImgs([1, 3])
[{'file_name': '62627.jpg', 'id': 1, 'width': 2048, 'height': 2048},
 {'file_name': '84351.jpg', 'id': 3, 'width': 2048, 'height': 2048}]
 
img_path = '../tt100k_2021/train'
from PIL import Image, ImageDraw
import os
coco_classes = dict((v['id'], v['name']) for k,v in coco.cats.items())
import matplotlib.pyplot as plt
imgs = []
for img_id in ids[:3]: # 取前3张图给大家展示
    ann_ids = coco.getAnnIds(imgIds=img_id) # 获取图片对应id的所有annotations的id信息
    targets = coco.loadAnns(ann_ids) # 根据annotations idx的信息获取所有标注信息
    path = coco.loadImgs(img_id)[0]['file_name'] # 获取文件名
    img = Image.open(os.path.join(img_path, path)).convert('RGB') # 读图片
    draw = ImageDraw.Draw(img)
    for target in targets: # 画出目标框
        x, y, w, h = target['bbox']
        x1, y1, x2, y2 = x, y, int(x + w), int(y + h)
        draw.rectangle((x1, y1, x2, y2))
        draw.text((x1, y1), coco_classes[target['category_id']])
        imgs.append(img)
#     plt.figure(figsize=(20, 20))    
#     plt.imshow(img)
#     plt.show()
imgs[0]

在这里插入图片描述

imgs[1]

在这里插入图片描述


coco_classes  # 数据类别
{1: 'ph2',
 2: 'pl40',
...
 143: 'w18',
 144: 'i15'}
 s = '\n'.join(list(coco_classes.values())) # 数据类别文件
 with open('./model_data/tt100k_144_classes.txt', 'w') as f:
    f.write(s)

7. 训练

  • train.py文件
import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from nets.yolo4 import YoloBody
from nets.yolo_training import LossHistory, YOLOLoss, weights_init
# from utils.dataloader import YoloDataset, yolo_dataset_collate
from utils.coco import COCO, yolo_dataset_collate, COCOEval
from utils.utils import DecodeBox, non_max_suppression, add_weight_decay, setup_seed
from utils.summary import Summary
import shutil
yolo_decodes = []
confidence = 0.01
iou = 0.5
g_steps = 0
def get_classes(classes_path):  #   获得类和先验框
    with open(classes_path) as f:  #   获得类
        class_names = f.readlines()
    class_names = [c.strip() for c in class_names]
    return class_names
def get_anchors(anchors_path):
    with open(anchors_path) as f:  #   文件加载先验框
        anchors = f.readline()
    anchors = [float(x) for x in anchors.split(',')]
    return np.array(anchors).reshape([-1, 3, 2])[::-1, :, :]
def get_lr(optimizer):
    for param_group in optimizer.param_groups:
        return param_group['lr']
def fit_one_epoch(net, yolo_loss, epoch, epoch_size, epoch_size_val, gen, genval, Epoch, cuda):
    total_loss = 0
    val_loss = 0
    net.train()
    print('Start Train')
    with tqdm(total=epoch_size, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
        for iteration, batch in enumerate(gen):
            if iteration >= epoch_size:
                break
            # images, targets = batch[0], batch[1]
            img_ids, images, targets = batch[0], batch[1], batch[2]
            with torch.no_grad():
                if cuda:
                    images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
                    targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
                else:
                    images = torch.from_numpy(images).type(torch.FloatTensor)
                    targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
            optimizer.zero_grad()
            outputs = net(images)
            losses = []
            num_pos_all = 0
            for i in range(3):  #   计算损失
                loss_item, num_pos = yolo_loss(outputs[i], targets)
                losses.append(loss_item)
                num_pos_all += num_pos
            loss = sum(losses) / num_pos_all
            total_loss += loss.item()
            loss.backward()
            optimizer.step()
            summary.add_scalar('train/total_loss', total_loss / (iteration + 1), epoch * epoch_size + iteration)
            pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1),
                                'lr': get_lr(optimizer)})
            pbar.update(1)
    net.eval()
    print('Start Validation')
    with torch.no_grad():
        img_ids = []
        detections = []
        with tqdm(total=epoch_size_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
            for iteration, batch in enumerate(genval):
                if iteration >= epoch_size_val:
                    break
                # images_val, targets_val = batch[0], batch[1]
                img_ids_val, images_val, targets_val = batch[0], batch[1], batch[2]
                with torch.no_grad():
                    if cuda:
                        images_val = torch.from_numpy(images_val).type(torch.FloatTensor).cuda()
                        targets_val = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets_val]
                    else:
                        images_val = torch.from_numpy(images_val).type(torch.FloatTensor)
                        targets_val = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets_val]
                    optimizer.zero_grad()
                    outputs = net(images_val)
                    output_list = []  #   计算mAP
                    for ii in range(3):
                        output_list.append(yolo_decodes[ii](outputs[ii]))
                    output = torch.cat(output_list, 1)
                    batch_detections = non_max_suppression(output, num_classes, conf_thres=confidence,
                                                           nms_thres=iou)
                    img_ids += img_ids_val
                    detections += batch_detections
                    losses = []
                    num_pos_all = 0
                    for i in range(3):
                        loss_item, num_pos = yolo_loss(outputs[i], targets_val)
                        losses.append(loss_item)
                        num_pos_all += num_pos
                    loss = sum(losses) / num_pos_all
                    val_loss += loss.item()
                # 将loss写入tensorboard, 下面注释的是每一步都写
                # if Tensorboard:
                #     writer.add_scalar('Val_loss', loss, val_tensorboard_step)
                #     val_tensorboard_step += 1
                summary.add_scalar('val/total_loss', val_loss / (iteration + 1), epoch * epoch_size + iteration)
                pbar.set_postfix(**{'total_loss': val_loss / (iteration + 1)})
                pbar.update(1)
        loss_history.append_loss(total_loss / (epoch_size + 1), val_loss / (epoch_size_val + 1))
        eval_results = val_dataset.run_eval(img_ids, detections)
        summary.add_scalar('val/mAP', eval_results[0], epoch + 1)
        print('Finish Validation')
        print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
        print(eval_results)
        print('Total Loss: %.4f || Val Loss: %.4f ' % (total_loss / (epoch_size + 1), val_loss / (epoch_size_val + 1)))
        net_save_path = os.path.join(os.path.join(exp_dir, "ckpt"), "model_latest.pth")
        net_save_path_best = os.path.join(os.path.join(exp_dir, "ckpt"), "best_weights.pth")
        save_best = False
        if eval_results[0] >= metrics['mAP']:
            save_best = True
            metrics['train_loss'] = total_loss / (epoch_size + 1)
            metrics['val_loss'] = val_loss / (epoch_size_val + 1)
            metrics['mAP'] = eval_results[0]
            metrics['best_model_epoch'] = epoch + 1
            print('Saving state, iter:', str(epoch + 1))
        torch.save({"state_dict": model.state_dict(),
                    "metric": metrics}
                   , net_save_path)
        if save_best:
            shutil.copy(net_save_path, net_save_path_best)
            print("Saving current best: {}, metric:{}".format(net_save_path_best, metrics))
        print("@@@ best metric:{}".format(net_save_path_best, metrics)
if __name__ == "__main__":
    setup_seed(1215)
    Cuda = True  #   是否使用Cuda 没有GPU可以设置成False
    # ------------------------------------------------------#
    #   是否对损失进行归一化,用于改变loss的大小
    #   用于决定计算最终loss是除上batch_size还是除上正样本数量
    # ------------------------------------------------------#
    normalize = False
    # -------------------------------#
    #   输入的shape大小
    #   显存比较小可以使用416x416
    #   显存比较大可以使用608x608
    # -------------------------------#
    input_shape = (416, 416)
    # input_shape = (608, 608)
    # ----------------------------------------------------#
    #   classes和anchor的路径,非常重要
    #   训练前一定要修改classes_path,使其对应自己的数据集
    # ----------------------------------------------------#
    anchors_path = 'model_data/yolo_anchors.txt'
    classes_path = 'model_data/tt100k_144_classes.txt'
    # ------------------------------------------------------#
    #   Yolov4的tricks应用
    #   mosaic 马赛克数据增强 True or False 
    #   实际测试时mosaic数据增强并不稳定,所以默认为False
    #   Cosine_scheduler 余弦退火学习率 True or False
    #   label_smoothing 标签平滑 0.01以下一般 如0.01、0.005
    # ------------------------------------------------------#
    mosaic = False
    Cosine_lr = True
    smoooth_label = 0.005
    weight_decay = 1e-5
    metrics = {'mAP': 0, 'train_loss': float('inf'), 'val_loss': float('inf'), 'best_model_epoch': 0}
    class_names = get_classes(classes_path)  #   获取classes和anchor
    anchors = get_anchors(anchors_path)
    num_classes = len(class_names)
    for i in range(3):
        yolo_decodes.append(
            DecodeBox(anchors[i], num_classes, (input_shape[1], input_shape[0])))
    # ------------------------------------------------------#
    #   创建yolo模型  训练前一定要修改classes_path和对应的txt文件
    # ------------------------------------------------------#
    model = YoloBody(len(anchors[0]), num_classes)
    weights_init(model)
    # 想从头训练, 不使用预训练的权重, 就下面这段代码注释掉
    model_path = "model_data/yolo4_weights.pth"
    # model_path = "exp/exp_baseline/ckpt/best_weights.pth"
    print('Loading weights into state dict...')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)
    model_dict = model.state_dict()
    pretrained_dict = torch.load(model_path, map_location=device)["state_dict"]
    pretrained_dict = {k: v for k, v in pretrained_dict.items() if np.shape(model_dict[k]) == np.shape(v)}
    model_dict.update(pretrained_dict)
    model.load_state_dict(model_dict)
    print('Finished!')
    net = model.train()
    if Cuda:
        net = torch.nn.DataParallel(model)
        cudnn.benchmark = True
        net = net.cuda()
    yolo_loss = YOLOLoss(np.reshape(anchors, [-1, 2]), num_classes, (input_shape[1], input_shape[0]), smoooth_label,Cuda, normalize)
    exp_dir = "./exp/exp_baseline_mosaic"
    loss_history = LossHistory(exp_dir)
    summary = Summary(os.path.join(exp_dir, "summary"))
    ckpt = os.path.join(exp_dir, "ckpt")
    if not os.path.exists(ckpt):
        os.makedirs(ckpt)
    if Cuda:
        graph_inputs = torch.randn(1, 3, input_shape[0], input_shape[1]).type(torch.FloatTensor).cuda()
    else:
        graph_inputs = torch.randn(1, 3, input_shape[0], input_shape[1]).type(torch.FloatTensor)
    summary.add_graph(model, graph_inputs)
    data_dir = "../tt100k_2021/"   #   获得图片路径和标签
    # ------------------------------------------------------#
    #   主干特征提取网络特征通用,冻结训练可以加快训练速度
    #   也可以在训练初期防止权值被破坏。
    #   Init_Epoch为起始世代
    #   Freeze_Epoch为冻结训练的世代
    #   Epoch总训练世代
    #   提示OOM或者显存不足请调小Batch_size
    # ------------------------------------------------------#
    if True:
        lr = 1e-3
        Batch_size = 16
        Init_Epoch = 0
        Freeze_Epoch = 30
        # ----------------------------------------------------------------------------#
        #  YOLOv5代码中conv层和FC层的bias参数,以及BN层参数并不进行权重衰减,此处采用这个方法
        # ----------------------------------------------------------------------------#
        parameters = add_weight_decay(net, weight_decay)
        optimizer = optim.Adam(parameters, lr)
        # optimizer = optim.Adam(net.parameters(), lr)
        if Cosine_lr:
            lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
        else:
            lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)
        train_dataset = COCO(data_dir, (input_shape[0], input_shape[1]), mosaic=mosaic)
        val_dataset = COCOEval(data_dir, (input_shape[0], input_shape[1]))
        # train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
        # val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
        gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                         drop_last=True, collate_fn=yolo_dataset_collate)
        gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                             drop_last=True, collate_fn=yolo_dataset_collate)
        epoch_size = len(train_dataset) // Batch_size
        epoch_size_val = len(val_dataset) // Batch_size
        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
        for param in model.backbone.parameters():  #   冻结一定部分训练
            param.requires_grad = False
        for epoch in range(Init_Epoch, Freeze_Epoch):
            fit_one_epoch(net, yolo_loss, epoch, epoch_size, epoch_size_val, gen, gen_val, Freeze_Epoch, Cuda)
            lr_scheduler.step()
    torch.cuda.empty_cache()  # 释放所有未占用的缓存内存
    if True:
        lr = 1e-4
        Batch_size = 8
        Freeze_Epoch = 30
        Unfreeze_Epoch = 100
        # ----------------------------------------------------------------------------#
        #   实际测试时,发现optimizer的weight_decay起到了反作用,
        #   所以去除掉了weight_decay,大家也可以开起来试试,一般是weight_decay=5e-4
        # ----------------------------------------------------------------------------#
        parameters = add_weight_decay(net, weight_decay)
        optimizer = optim.Adam(parameters, lr)
        # optimizer = optim.Adam(net.parameters(), lr)
        if Cosine_lr:
            lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=1e-5)
        else:
            lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.92)
        train_dataset = COCO(data_dir, (input_shape[0], input_shape[1]), mosaic=mosaic)
        val_dataset = COCOEval(data_dir, (input_shape[0], input_shape[1]))
        # train_dataset = YoloDataset(lines[:num_train], (input_shape[0], input_shape[1]), mosaic=mosaic, is_train=True)
        # val_dataset = YoloDataset(lines[num_train:], (input_shape[0], input_shape[1]), mosaic=False, is_train=False)
        gen = DataLoader(train_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                         drop_last=True, collate_fn=yolo_dataset_collate)
        gen_val = DataLoader(val_dataset, shuffle=True, batch_size=Batch_size, num_workers=4, pin_memory=True,
                             drop_last=True, collate_fn=yolo_dataset_collate)
        epoch_size = len(train_dataset) // Batch_size
        epoch_size_val = len(val_dataset) // Batch_size
        if epoch_size == 0 or epoch_size_val == 0:
            raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
        for param in model.backbone.parameters():  #   解冻后训练
            param.requires_grad = True
        for epoch in range(Freeze_Epoch, Unfreeze_Epoch):
            fit_one_epoch(net, yolo_loss, epoch, epoch_size, epoch_size_val, gen, gen_val, Unfreeze_Epoch, Cuda)
            lr_scheduler.step()

在这里插入图片描述

8.预测

  • predict.py文件
# ----------------------------------------------------#
#   对视频中的predict.py进行了修改,将单张图片预测、摄像头检测和FPS测试功能
#   整合到了一个py文件中,通过指定mode进行模式的修改
# ----------------------------------------------------#
import time
import cv2
import numpy as np
from PIL import Image
from yolo import YOLO
if __name__ == "__main__":
    yolo = YOLO()
    # -------------------------------------------------------------------------#
    #   mode用于指定测试的模式
    #   'predict'表示单张图片预测
    #   'video'表示视频检测
    #   'fps'表示测试fps
    # -------------------------------------------------------------------------#
    mode = "video"
    # -------------------------------------------------------------------------#
    #   video_path用于指定视频的路径,当video_path=0时表示检测摄像头
    #   video_save_path表示视频保存的路径,当video_save_path=""时表示不保存
    #   video_fps用于保存的视频的fps
    #   video_path、video_save_path和video_fps仅在mode='video'时有效
    #   保存视频时需要ctrl+c退出才会完成完整的保存步骤,不可直接结束程序
    # -------------------------------------------------------------------------#
    video_path = './video.mp4'
    video_save_path = ""
    video_fps = 25.0
    if mode == "predict":
        '''
        1、该代码无法直接进行批量预测,如果想要批量预测,可以利用os.listdir()遍历文件夹,利用Image.open打开图片文件进行预测。具体流程可以参考get_dr_txt.py,在get_dr_txt.py即实现了遍历还实现了目标信息的保存。
        2、如果想要进行检测完的图片的保存,利用r_image.save("img.jpg")即可保存,直接在predict.py里进行修改即可。 
        3、如果想要获得预测框的坐标,可以进入yolo.detect_image函数,在绘图部分读取top,left,bottom,right这四个值。
        4、如果想要利用预测框截取下目标,可以进入yolo.detect_image函数,在绘图部分利用获取到的top,left,bottom,right这四个值在原图上利用矩阵的方式进行截取。
        5、如果想要在预测图上写额外的字,比如检测到的特定目标的数量,可以进入yolo.detect_image函数,在绘图部分对predicted_class进行判断,比如判断if predicted_class == 'car': 即可判断当前目标是否为车,然后记录数量即可。利用draw.text即可写字。
        '''
        while True:
            img = input('Input image filename:')
            try:
                image = Image.open(img)
            except:
                print('Open Error! Try again!')
                continue
            else:
                r_image = yolo.detect_image(image)
                r_image.save(img.split("/")[-1])
                r_image.show()
    elif mode == "video":
        capture = cv2.VideoCapture(video_path)
        if video_save_path != "":
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            size = (int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
            out = cv2.VideoWriter(video_save_path, fourcc, video_fps, size)
        fps = 0.0
        while (True):
            t1 = time.time()
            ref, frame = capture.read()  # 读取某一帧
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # 格式转变BGRtoRGB
            frame = Image.fromarray(np.uint8(frame))  # 转变成Image
            frame = np.array(yolo.detect_image(frame))  # 进行检测
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # RGBtoBGR满足opencv显示格式
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("fps= %.2f" % (fps))
            frame = cv2.putText(frame, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
            cv2.imshow("video", frame)
            c = cv2.waitKey(1) & 0xff
            if video_save_path != "":
                out.write(frame)
            if c == 27:
                capture.release()
                break
        capture.release()
        out.release()
        cv2.destroyAllWindows()
    elif mode == "fps":
        test_interval = 100
        img = Image.open('img/street.jpg')
        tact_time = yolo.get_FPS(img, test_interval)
        print(str(tact_time) + ' seconds, ' + str(1 / tact_time) + 'FPS, @batch_size 1')
    else:
        raise AssertionError("Please specify the correct mode: 'predict', 'video' or 'fps'.")

在这里插入图片描述

在这里插入图片描述

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

阿值学长

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值