detcet.py详解
代码结构总结
全局导入部分
- 导入安装好的库
- 获取当前文件绝对路径
- 加载自定义的模块
"""
import argparse
import csv
import os
import platform
import sys
from pathlib import Path
import torch
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (
LOGGER,
Profile,
check_file,
check_img_size,
check_imshow,
check_requirements,
colorstr,
cv2,
increment_path,
non_max_suppression,
print_args,
scale_boxes,
strip_optimizer,
xyxy2xywh,
)
from utils.torch_utils import select_device, smart_inference_mode
执行main函数部分
- 解析命令行参数
- 执行main函数
- 检查环境是否都安好了
- 执行run函数,并传入 命令行参
def main(opt):
"""Executes YOLOv5 model inference with given options, checking requirements before running the model."""
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
设置opt参数部分
def parse_opt():
"""Parses command-line arguments for YOLOv5 detection, setting inference options and model configurations."""
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path or triton URL")
parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path")
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold")
parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument("--save-csv", action="store_true", help="save results in CSV format")
parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--visualize", action="store_true", help="visualize features")
parser.add_argument("--update", action="store_true", help="update all models")
parser.add_argument("--project", default=ROOT / "runs/detect", help="save results to project/name")
parser.add_argument("--name", default="exp", help="save results to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride")
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1
print_args(vars(opt))
return opt
run函数部分
- 载入命令行参数
- 初始化配置:source、save_img等
- 得到结果保存路径
- 加载模型
- 加载数据
- 推理部分
- 先对模型进行热身(预处理)
- 使用for循环遍历每一帧或每一张图像
- 图像预处理:维度、归一化等
- 前向推理,得到推理的预测框
- 对预测框框执行非极大值抑制
- 把所有检测框画到原图中:用for循环遍历每个检测框
- 在原图上绘制检测框
- 判断要在窗口显示吗?
- 判断要保存图像吗?
- 日志打印每张图像所用时间
- 在终端打印出运行结果
'''=========================1.载入参数==========================='''
@smart_inference_mode()
def run(
weights=ROOT / "yolov5s.pt",
source=ROOT / "data/images",
data=ROOT / "data/coco128.yaml",
imgsz=(640, 640),
conf_thres=0.25,
iou_thres=0.45,
max_det=1000,
device="",
view_img=False,
save_txt=False,
save_csv=False,
save_conf=False,
save_crop=False,
nosave=False,
classes=None,
agnostic_nms=False,
augment=False,
visualize=False,
update=False,
project=ROOT / "runs/detect",
name="exp",
exist_ok=False,
line_thickness=3,
hide_labels=False,
hide_conf=False,
half=False,
dnn=False,
vid_stride=1,
):
'''=========================2.初始化配置==========================='''
source = str(source)
save_img = not nosave and not source.endswith(".txt")
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
screenshot = source.lower().startswith("screen")
if is_url and is_file:
source = check_file(source)
'''=========================3.保存结果==========================='''
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)
(save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)
'''=========================4.加载模型==========================='''
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
'''
stride:推理时所用到的步长,默认为32, 大步长适合于大目标,小步长适合于小目标
names:保存推理结果名的列表,比如默认模型的值是['person', 'bicycle', 'car', ...]
pt: 加载的是否是pytorch模型(也就是pt格式的文件)
'''
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride)
'''=========================5.加载数据==========================='''
bs = 1
if webcam:
view_img = check_imshow(warn=True)
'''
source:输入数据源;image_size 图片识别前被放缩的大小;stride:识别时的步长,
auto的作用可以看utils.augmentations.letterbox方法,它决定了是否需要将图片填充为正方形,如果auto=True则不需要
'''
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
bs = len(dataset)
elif screenshot:
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
vid_path, vid_writer = [None] * bs, [None] * bs
'''=========================6.推理部分==========================='''
'''推理部分:推理部分是整个算法的核心部分,通过for循环对加载的数据进行遍历,一帧一帧的推理,进行非极大值抑制,绘制bounding box、预测类别。'''
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz))
seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
for path, im, im0s, vid_cap, s in dataset:
with dt[0]:
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float()
im /= 255
if len(im.shape) == 3:
im = im[None]
if model.xml and im.shape[0] > 1:
ims = torch.chunk(im, im.shape[0], 0)
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
if model.xml and im.shape[0] > 1:
pred = None
for image in ims:
if pred is None:
pred = model(image, augment=augment, visualize=visualize).unsqueeze(0)
else:
pred = torch.cat((pred, model(image, augment=augment, visualize=visualize).unsqueeze(0)), dim=0)
pred = [pred, None]
else:
pred = model(im, augment=augment, visualize=visualize)
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
csv_path = save_dir / "predictions.csv"
def write_to_csv(image_name, prediction, confidence):
"""Writes prediction data for an image to a CSV file, appending if the file exists."""
data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence}
with open(csv_path, mode="a", newline="") as f:
writer = csv.DictWriter(f, fieldnames=data.keys())
if not csv_path.is_file():
writer.writeheader()
writer.writerow(data)
'''
这段代码使用了一个循环来遍历检测结果列表中的每个物体,并对每个物体进行处理
循环中的变量“i”是一个索引变量,表示当前正在处理第几个物体,而变量"det"则表示当前物体的检测结果。循环体中的第一行代码"seen += 1"用于增加一个计数器,记录已处理的物体数量。
接下来,根据是否使用网络摄像头来判断处理单张图像还是批量图像。
如果使用的是网络摄像头,则代码会遍历每个图像并复制一份备份到变量"im0"中,同时将当前图像的路径和计数器记录到变量"p"和"frame"中。最后,将当前处理的物体索引和相关信息记录到字符串变量"s"中。
如果没有使用网络摄像头,则会直接使用"im0s"变量中的图像,将图像路径和计数器记录到变量"p"和"frame"中。同时,还会检査数据集中是否有"frame"属性,如果有,则将其值记录到变量"frame"中。
'''
for i, det in enumerate(pred):
seen += 1
if webcam:
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f"{i}: "
else:
'''
大部分我们一般都是从LoadImages流读取本都文件中的照片或者视频 所以batch_size=1
p: 当前图片/视频的绝对路径 如 F:\yolo_v5\yolov5-U\data\images\bus.jpg
im0: 原始图片 letterbox + pad 之前的图片
frame: 视频流,此次取的是第几张图片
'''
p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
p = Path(p)
save_path = str(save_dir / p.name)
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}")
s += "%gx%g " % im.shape[2:]
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
imc = im0.copy() if save_crop else im0
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum()
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "
for *xyxy, conf, cls in reversed(det):
c = int(cls)
label = names[c] if hide_conf else f"{names[c]}"
confidence = float(conf)
confidence_str = f"{confidence:.2f}"
if save_csv:
write_to_csv(p.name, label, confidence_str)
if save_txt:
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
line = (cls, *xywh, conf) if save_conf else (cls, *xywh)
with open(f"{txt_path}.txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or save_crop or view_img:
c = int(cls)
label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}")
annotator.box_label(xyxy, label, color=colors(c, True))
if save_crop:
save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True)
'''
这段代码片段的功能是将检测结果实时流式传输,并且在需要时保存检测结果。
具体来说:
首先,将检测结果图像 im0 通过 annotator.result() 获取。
如果 view_img 为真,则显示图像流。这里使用了 OpenCV 的 cv2.imshow() 函数将图像显示在窗口中,并通过 cv2.waitKey(1) 等待1毫秒,以便允许用户对窗口进行交互操作。
接着,在满足条件 save_img 为真时,保存检测结果图像:
如果数据集的模式是图像,直接使用 OpenCV 的 cv2.imwrite() 函数将图像保存到指定路径 save_path。
如果数据集的模式是视频或流,则执行以下操作:
检查当前视频的保存路径是否与之前不同,如果是,则需要初始化一个新的视频写入器。
释放之前的视频写入器。
如果是视频文件,则获取视频的帧率(fps)、宽度和高度。
如果是实时视频流,则假设帧率为 30 帧每秒(fps=30),并获取当前帧的宽度和高度。
将保存路径的文件后缀强制设为 ".mp4"。
使用 OpenCV 的 cv2.VideoWriter() 函数初始化一个视频写入器,指定视频编解码器为 MP4V,帧率为上面获取的帧率,图像大小为上面获取的宽度和高度。
将当前帧 im0 写入视频。
综上所述,这段代码的功能是实时流式传输检测结果,并在需要时保存这些结果。
'''
im0 = annotator.result()
if view_img:
if platform.system() == "Linux" and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else:
if vid_path[i] != save_path:
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release()
if vid_cap:
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else:
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix(".mp4"))
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
vid_writer[i].write(im0)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
'''================7.在终端里打印出运行的结果============================'''
t = tuple(x.t / seen * 1e3 for x in dt)
LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(weights[0])
整体detect.py代码
"""
Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
Usage - sources:
$ python detect.py --weights yolov5s.pt --source 0 # webcam
img.jpg # image
vid.mp4 # video
screen # screenshot
path/ # directory
list.txt # list of images
list.streams # list of streams
'path/*.jpg' # glob
'https://youtu.be/LNwODJXcvt4' # YouTube
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
Usage - formats:
$ python detect.py --weights yolov5s.pt # PyTorch
yolov5s.torchscript # TorchScript
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
yolov5s_openvino_model # OpenVINO
yolov5s.engine # TensorRT
yolov5s.mlmodel # CoreML (macOS-only)
yolov5s_saved_model # TensorFlow SavedModel
yolov5s.pb # TensorFlow GraphDef
yolov5s.tflite # TensorFlow Lite
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
yolov5s_paddle_model # PaddlePaddle
"""
import argparse
import csv
import os
import platform
import sys
from pathlib import Path
import torch
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT))
ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
from ultralytics.utils.plotting import Annotator, colors, save_one_box
from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
from utils.general import (
LOGGER,
Profile,
check_file,
check_img_size,
check_imshow,
check_requirements,
colorstr,
cv2,
increment_path,
non_max_suppression,
print_args,
scale_boxes,
strip_optimizer,
xyxy2xywh,
)
from utils.torch_utils import select_device, smart_inference_mode
'''=========================1.载入参数==========================='''
@smart_inference_mode()
def run(
weights=ROOT / "yolov5s.pt",
source=ROOT / "data/images",
data=ROOT / "data/coco128.yaml",
imgsz=(640, 640),
conf_thres=0.25,
iou_thres=0.45,
max_det=1000,
device="",
view_img=False,
save_txt=False,
save_csv=False,
save_conf=False,
save_crop=False,
nosave=False,
classes=None,
agnostic_nms=False,
augment=False,
visualize=False,
update=False,
project=ROOT / "runs/detect",
name="exp",
exist_ok=False,
line_thickness=3,
hide_labels=False,
hide_conf=False,
half=False,
dnn=False,
vid_stride=1,
):
'''=========================2.初始化配置==========================='''
source = str(source)
save_img = not nosave and not source.endswith(".txt")
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
is_url = source.lower().startswith(("rtsp://", "rtmp://", "http://", "https://"))
webcam = source.isnumeric() or source.endswith(".streams") or (is_url and not is_file)
screenshot = source.lower().startswith("screen")
if is_url and is_file:
source = check_file(source)
'''=========================3.保存结果==========================='''
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok)
(save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True)
'''=========================4.加载模型==========================='''
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
'''
stride:推理时所用到的步长,默认为32, 大步长适合于大目标,小步长适合于小目标
names:保存推理结果名的列表,比如默认模型的值是['person', 'bicycle', 'car', ...]
pt: 加载的是否是pytorch模型(也就是pt格式的文件)
'''
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride)
'''=========================5.加载数据==========================='''
bs = 1
if webcam:
view_img = check_imshow(warn=True)
'''
source:输入数据源;image_size 图片识别前被放缩的大小;stride:识别时的步长,
auto的作用可以看utils.augmentations.letterbox方法,它决定了是否需要将图片填充为正方形,如果auto=True则不需要
'''
dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
bs = len(dataset)
elif screenshot:
dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
else:
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
vid_path, vid_writer = [None] * bs, [None] * bs
'''=========================6.推理部分==========================='''
'''推理部分:推理部分是整个算法的核心部分,通过for循环对加载的数据进行遍历,一帧一帧的推理,进行非极大值抑制,绘制bounding box、预测类别。'''
model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz))
seen, windows, dt = 0, [], (Profile(device=device), Profile(device=device), Profile(device=device))
for path, im, im0s, vid_cap, s in dataset:
with dt[0]:
im = torch.from_numpy(im).to(model.device)
im = im.half() if model.fp16 else im.float()
im /= 255
if len(im.shape) == 3:
im = im[None]
if model.xml and im.shape[0] > 1:
ims = torch.chunk(im, im.shape[0], 0)
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
if model.xml and im.shape[0] > 1:
pred = None
for image in ims:
if pred is None:
pred = model(image, augment=augment, visualize=visualize).unsqueeze(0)
else:
pred = torch.cat((pred, model(image, augment=augment, visualize=visualize).unsqueeze(0)), dim=0)
pred = [pred, None]
else:
pred = model(im, augment=augment, visualize=visualize)
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
csv_path = save_dir / "predictions.csv"
def write_to_csv(image_name, prediction, confidence):
"""Writes prediction data for an image to a CSV file, appending if the file exists."""
data = {"Image Name": image_name, "Prediction": prediction, "Confidence": confidence}
with open(csv_path, mode="a", newline="") as f:
writer = csv.DictWriter(f, fieldnames=data.keys())
if not csv_path.is_file():
writer.writeheader()
writer.writerow(data)
'''
这段代码使用了一个循环来遍历检测结果列表中的每个物体,并对每个物体进行处理
循环中的变量“i”是一个索引变量,表示当前正在处理第几个物体,而变量"det"则表示当前物体的检测结果。循环体中的第一行代码"seen += 1"用于增加一个计数器,记录已处理的物体数量。
接下来,根据是否使用网络摄像头来判断处理单张图像还是批量图像。
如果使用的是网络摄像头,则代码会遍历每个图像并复制一份备份到变量"im0"中,同时将当前图像的路径和计数器记录到变量"p"和"frame"中。最后,将当前处理的物体索引和相关信息记录到字符串变量"s"中。
如果没有使用网络摄像头,则会直接使用"im0s"变量中的图像,将图像路径和计数器记录到变量"p"和"frame"中。同时,还会检査数据集中是否有"frame"属性,如果有,则将其值记录到变量"frame"中。
'''
for i, det in enumerate(pred):
seen += 1
if webcam:
p, im0, frame = path[i], im0s[i].copy(), dataset.count
s += f"{i}: "
else:
'''
大部分我们一般都是从LoadImages流读取本都文件中的照片或者视频 所以batch_size=1
p: 当前图片/视频的绝对路径 如 F:\yolo_v5\yolov5-U\data\images\bus.jpg
im0: 原始图片 letterbox + pad 之前的图片
frame: 视频流,此次取的是第几张图片
'''
p, im0, frame = path, im0s.copy(), getattr(dataset, "frame", 0)
p = Path(p)
save_path = str(save_dir / p.name)
txt_path = str(save_dir / "labels" / p.stem) + ("" if dataset.mode == "image" else f"_{frame}")
s += "%gx%g " % im.shape[2:]
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]
imc = im0.copy() if save_crop else im0
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum()
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "
for *xyxy, conf, cls in reversed(det):
c = int(cls)
label = names[c] if hide_conf else f"{names[c]}"
confidence = float(conf)
confidence_str = f"{confidence:.2f}"
if save_csv:
write_to_csv(p.name, label, confidence_str)
if save_txt:
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()
line = (cls, *xywh, conf) if save_conf else (cls, *xywh)
with open(f"{txt_path}.txt", "a") as f:
f.write(("%g " * len(line)).rstrip() % line + "\n")
if save_img or save_crop or view_img:
c = int(cls)
label = None if hide_labels else (names[c] if hide_conf else f"{names[c]} {conf:.2f}")
annotator.box_label(xyxy, label, color=colors(c, True))
if save_crop:
save_one_box(xyxy, imc, file=save_dir / "crops" / names[c] / f"{p.stem}.jpg", BGR=True)
'''
这段代码片段的功能是将检测结果实时流式传输,并且在需要时保存检测结果。
具体来说:
首先,将检测结果图像 im0 通过 annotator.result() 获取。
如果 view_img 为真,则显示图像流。这里使用了 OpenCV 的 cv2.imshow() 函数将图像显示在窗口中,并通过 cv2.waitKey(1) 等待1毫秒,以便允许用户对窗口进行交互操作。
接着,在满足条件 save_img 为真时,保存检测结果图像:
如果数据集的模式是图像,直接使用 OpenCV 的 cv2.imwrite() 函数将图像保存到指定路径 save_path。
如果数据集的模式是视频或流,则执行以下操作:
检查当前视频的保存路径是否与之前不同,如果是,则需要初始化一个新的视频写入器。
释放之前的视频写入器。
如果是视频文件,则获取视频的帧率(fps)、宽度和高度。
如果是实时视频流,则假设帧率为 30 帧每秒(fps=30),并获取当前帧的宽度和高度。
将保存路径的文件后缀强制设为 ".mp4"。
使用 OpenCV 的 cv2.VideoWriter() 函数初始化一个视频写入器,指定视频编解码器为 MP4V,帧率为上面获取的帧率,图像大小为上面获取的宽度和高度。
将当前帧 im0 写入视频。
综上所述,这段代码的功能是实时流式传输检测结果,并在需要时保存这些结果。
'''
im0 = annotator.result()
if view_img:
if platform.system() == "Linux" and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1)
if save_img:
if dataset.mode == "image":
cv2.imwrite(save_path, im0)
else:
if vid_path[i] != save_path:
vid_path[i] = save_path
if isinstance(vid_writer[i], cv2.VideoWriter):
vid_writer[i].release()
if vid_cap:
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
else:
fps, w, h = 30, im0.shape[1], im0.shape[0]
save_path = str(Path(save_path).with_suffix(".mp4"))
vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
vid_writer[i].write(im0)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
'''================7.在终端里打印出运行的结果============================'''
t = tuple(x.t / seen * 1e3 for x in dt)
LOGGER.info(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}" % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ""
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(weights[0])
def parse_opt():
"""Parses command-line arguments for YOLOv5 detection, setting inference options and model configurations."""
parser = argparse.ArgumentParser()
parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model path or triton URL")
parser.add_argument("--source", type=str, default=ROOT / "data/images", help="file/dir/URL/glob/screen/0(webcam)")
parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="(optional) dataset.yaml path")
parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640], help="inference size h,w")
parser.add_argument("--conf-thres", type=float, default=0.25, help="confidence threshold")
parser.add_argument("--iou-thres", type=float, default=0.45, help="NMS IoU threshold")
parser.add_argument("--max-det", type=int, default=1000, help="maximum detections per image")
parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--view-img", action="store_true", help="show results")
parser.add_argument("--save-txt", action="store_true", help="save results to *.txt")
parser.add_argument("--save-csv", action="store_true", help="save results in CSV format")
parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels")
parser.add_argument("--save-crop", action="store_true", help="save cropped prediction boxes")
parser.add_argument("--nosave", action="store_true", help="do not save images/videos")
parser.add_argument("--classes", nargs="+", type=int, help="filter by class: --classes 0, or --classes 0 2 3")
parser.add_argument("--agnostic-nms", action="store_true", help="class-agnostic NMS")
parser.add_argument("--augment", action="store_true", help="augmented inference")
parser.add_argument("--visualize", action="store_true", help="visualize features")
parser.add_argument("--update", action="store_true", help="update all models")
parser.add_argument("--project", default=ROOT / "runs/detect", help="save results to project/name")
parser.add_argument("--name", default="exp", help="save results to project/name")
parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment")
parser.add_argument("--line-thickness", default=3, type=int, help="bounding box thickness (pixels)")
parser.add_argument("--hide-labels", default=False, action="store_true", help="hide labels")
parser.add_argument("--hide-conf", default=False, action="store_true", help="hide confidences")
parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference")
parser.add_argument("--dnn", action="store_true", help="use OpenCV DNN for ONNX inference")
parser.add_argument("--vid-stride", type=int, default=1, help="video frame-rate stride")
opt = parser.parse_args()
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1
print_args(vars(opt))
return opt
def main(opt):
"""Executes YOLOv5 model inference with given options, checking requirements before running the model."""
check_requirements(ROOT / "requirements.txt", exclude=("tensorboard", "thop"))
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
参考文章