第一:在项目目录下创建一个DetectAPI.py 文件然后把下面代码复制到py文件里:
import argparse
import os
import platform
import random
import sys
import time
from pathlib import Path
import torch
from torch.backends import cudnn
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode, time_sync
"""
使用面向对象编程中的类来封装,需要去除掉原始 detect.py 中的结果保存方法,重写
保存方法将结果保存到一个 csv 文件中并打上视频的对应帧率
"""
class YoloOpt:
def __init__(self, weights='weights/last.pt',
imgsz=(640, 640), conf_thres=0.25,
iou_thres=0.45, device='0', view_img=False,
classes=None, agnostic_nms=False,
augment=False, update=False, exist_ok=False,
project='/detect/result', name='result_exp',
save_csv=True,
half_yn=False):
self.weights = weights # 权重文件地址
self.source = None # 待识别的图像
if imgsz is None:
self.imgsz = (640, 640)
self.imgsz = imgsz # 输入图片的大小,默认 (640,640)
self.conf_thres = conf_thres # object置信度阈值 默认0.25 用在nms中
self.iou_thres = iou_thres # 做nms的iou阈值 默认0.45 用在nms中
self.device = device # 执行代码的设备,由于项目只能用 CPU,这里只封装了 CPU 的方法
self.view_img = view_img # 是否展示预测之后的图片或视频 默认False
self.classes = classes # 只保留一部分的类别,默认是全部保留
s