yolov7简化pt调用示例

import time

import cv2

import numpy as np
import torch
from numpy import random

from models.experimental import attempt_load
from utils.datasets import letterbox
from utils.general import non_max_suppression, scale_coords, xyxy2xywh
from utils.plots import plot_one_box
from utils.torch_utils import select_device


class YOLOv7Detector:

    def __init__(self, weights='best.pt', conf_thres=0.3, iou_thres=0.45):
        """ Initialization """
        self.half = False
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres
        self.device = select_device('0')
        self.model = attempt_load(weights, map_location=self.device)  # load FP32 model

        if self.half:
            self.model.half()  # to FP16

        # stride = int(model.stride.max())  # model stride
        # imgsz = check_img_size(imgsz, s=stride)  # check img_size

        # Get names and colors
        self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names
        self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in self.names]
     
    def image_preprocess(self, image):
        im0 = image.copy()

        img = letterbox(im0, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True,
                       stride=32)[0]
        # Convert
        img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416
        img = np.ascontiguousarray(img)
        img = torch.from_numpy(img).to(self.device)
        img = img.half() if self.half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        if img.ndimension() == 3:
            img = img.unsqueeze(0)
        return img, im0

    def __call__(self, image, *args, **kwargs):
        img, img0 = self.image_preprocess(image)
        pred = self.model(img, augment=False)[0]
        pred = non_max_suppression(pred, conf_thres=self.conf_thres, iou_thres=self.iou_thres, classes=None, agnostic=False)

        for i, det in enumerate(pred):  # detections per image
            # gn = torch.tensor(img0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
            if len(det):
                # # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()

                # Write results
                for *xyxy, conf, cls in reversed(det):
                    # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                    label = f'{self.names[int(cls)]} {conf:.2f}'
                    plot_one_box(xyxy, img0, label=label, color=self.colors[int(cls)], line_thickness=1)

        return img0

if __name__ == '__main__':
    yolov7_detector = YOLOv7Detector(weights='best.pt')
    img = 'E:/yolov7_ui_qt/VOCdevkit/VOC2007/JPEGImages/000000016761.jpg'
    while True:
        im0 = cv2.imread(img)
        t0 = time.time()
        im = yolov7_detector(im0)
        print(f'Done. ({time.time() - t0:.3f}s)')
        cv2.imshow("123456", im)
        cv2.waitKey(1)  # 1 millisecond

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

微笑 ❶

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值