yolov3的GUI界面(简易,图片检测)

我使用的是AB大神的darknet版yolov3,这里默认环境已经配置好了

当yoloGui文件写好之后发现yolov3自带的darknet_images.py文件中的检测函数一直报错,后来寻找到一位大佬的修改过的可以进行批量检测的代码,略加修改发现可用,在使用时需要将yolov3的相关文件改为自己的路径

在darknet文件夹下新建detect.py文件,复制以下内容

import argparse
import os
import glob
import random
import darknet
import time
import cv2
import numpy as np




def parser():
    parser = argparse.ArgumentParser(description="YOLO Object Detection")
    parser.add_argument("--input", type=str, default="",
                        help="image source. It can be a single image, a"
                        "txt with paths to them, or a folder. Image valid"
                        " formats are jpg, jpeg or png."
                        "If no input is given, ")
    parser.add_argument("--batch_size", default=1, type=int,
                        help="number of images to be processed at the same time")
    parser.add_argument("--weights", default="myData/backup/my_yolov3_last.weights",#改为自己的路径
                        help="yolo weights path")
    parser.add_argument("--dont_show", action='store_true',
                        help="windown inference display. For headless systems")
    parser.add_argument("--ext_output", action='store_true',
                        help="display bbox coordinates of detected objects")
    parser.add_argument("--save_labels", action='store_true',
                        help="save detections bbox for each image in yolo format")
    parser.add_argument("--config_file", default="./cfg/my_yolov3.cfg",
                        help="path to config file")
    parser.add_argument("--data_file", default="./cfg/my_data.data",
                        help="path to data file")
    parser.add_argument("--thresh", type=float, default=.25,
                        help="remove detections with lower confidence")
    return parser.parse_args()

def check_arguments_errors(args):
    assert 0 < args.thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
    if not os.path.exists(args.config_file):
        raise(ValueError("Invalid config path {}".format(os.path.abspath(args.config_file))))
    if not os.path.exists(args.weights):
        raise(ValueError("Invalid weight path {}".format(os.path.abspath(args.weights))))
    if not os.path.exists(args.data_file):
        raise(ValueError("Invalid data file path {}".format(os.path.abspath(args.data_file))))
    if args.input and not os.path.exists(args.input):
        raise(ValueError("Invalid image path {}".format(os.path.abspath(args.input))))


def check_batch_shape(images, batch_size):
    """
        Image sizes should be the same width and height
    """
    shapes = [image.shape for image in images]
    if len(set(shapes)) > 1:
        raise ValueError("Images don't have same shape")
    if len(shapes) > batch_size:
        raise ValueError("Batch size higher than number of images")
    return shapes[0]


def load_images(images_path):
    """
    If image path is given, return it directly
    For txt file, read it and return each line as image path
    In other case, it's a folder, return a list with names of each
    jpg, jpeg and png file
    """
    input_path_extension = images_path.split('.')[-1]
    if input_path_extension in ['jpg', 'jpeg', 'png']:
        return [images_path]
    elif input_path_extension == "txt":
        with open(images_path, "r") as f:
            return f.read().splitlines()
    else:
        return glob.glob(
            os.path.join(images_path, "*.jpg")) + \
            glob.glob(os.path.join(images_path, "*.png")) + \
            glob.glob(os.path.join(images_path, "*.jpeg"))


def prepare_batch(images, network, channels=3):
    width = darknet.network_width(network)
    height = darknet.network_height(network)

    darknet_images = []
    for image in images:
        image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image_resized = cv2.resize(image_rgb, (width, height),
                                   interpolation=cv2.INTER_LINEAR)
        custom_image = image_resized.transpose(2, 0, 1)
        darknet_images.append(custom_image)

    batch_array = np.concatenate(darknet_images, axis=0)
    batch_array = np.ascontiguousarray(batch_array.flat, dtype=np.float32)/255.0
    darknet_images = batch_array.ctypes.data_as(darknet.POINTER(darknet.c_float))
    return darknet.IMAGE(width, height, channels, darknet_images)


def image_detection(image_path,network, class_names, class_colors, thresh):
    # Darknet doesn't accept numpy images.
    # Create one with image we reuse for each detect
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    darknet_image = darknet.make_image(width, height, 3)
   
    image = cv2.imread(image_path)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                               interpolation=cv2.INTER_LINEAR)

    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.detect_image(network, class_names, darknet_image, thresh=thresh)
    darknet.free_image(darknet_image)
    image = darknet.draw_boxes(detections, image_resized, class_colors)
    #return cv2.cvtColor(image, cv2.COLOR_BGR2RGB), detections#当需要批量测试时使用这一行
    return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)#当使用yoloGui时使用这一行


def batch_detection(network, images, class_names, class_colors,
                    thresh=0.25, hier_thresh=.5, nms=.45, batch_size=4):
    image_height, image_width, _ = check_batch_shape(images, batch_size)
    darknet_images = prepare_batch(images, network)
    batch_detections = darknet.network_predict_batch(network, darknet_images, batch_size, image_width,
                                                     image_height, thresh, hier_thresh, None, 0, 0)
    batch_predictions = []
    for idx in range(batch_size):
        num = batch_detections[idx].num
        detections = batch_detections[idx].dets
        if nms:
            darknet.do_nms_obj(detections, num, len(class_names), nms)
        predictions = darknet.remove_negatives(detections, class_names, num)
        images[idx] = darknet.draw_boxes(predictions, images[idx], class_colors)
        batch_predictions.append(predictions)
    darknet.free_batch_detections(batch_detections, batch_size)
    return images, batch_predictions


def image_classification(image, network, class_names):
    width = darknet.network_width(network)
    height = darknet.network_height(network)
    image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image_resized = cv2.resize(image_rgb, (width, height),
                                interpolation=cv2.INTER_LINEAR)
    darknet_image = darknet.make_image(width, height, 3)
    darknet.copy_image_from_bytes(darknet_image, image_resized.tobytes())
    detections = darknet.predict_image(network, darknet_image)
    predictions = [(name, detections[idx]) for idx, name in enumerate(class_names)]
    darknet.free_image(darknet_image)
    return sorted(predictions, key=lambda x: -x[1])


def convert2relative(image, bbox):
    """
    YOLO format use relative coordinates for annotation
    """
    x, y, w, h = bbox
    height, width, _ = image.shape
    return x/width, y/height, w/width, h/height


def save_annotations(name, image, detections, class_names):
    """
    Files saved with image_name.txt and relative coordinates
    """
    file_name = name.split(".")[:-1][0] + ".txt"
    with open(file_name, "w") as f:
        for label, confidence, bbox in detections:
            x, y, w, h = convert2relative(image, bbox)
            label = class_names.index(label)
            f.write("{} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(label, x, y, w, h))


def batch_detection_example():
    args = parser()
    check_arguments_errors(args)
    batch_size = 3
    random.seed(3)  # deterministic bbox colors
    network, class_names, class_colors = darknet.load_network(
        args.config_file,
        args.data_file,
        args.weights,
        batch_size=batch_size
    )
    image_names = ['data/horses.jpg', 'data/horses.jpg', 'data/eagle.jpg']
    images = [cv2.imread(image) for image in image_names]
    images, detections,  = batch_detection(network, images, class_names,
                                           class_colors, batch_size=batch_size)
    for name, image in zip(image_names, images):
        cv2.imwrite(name.replace("data/", ""), image)
    print(detections)

def get_files(dir, suffix): 

    res = []

    for root, directory, files in os.walk(dir): 

        for filename in files:

            name, suf = os.path.splitext(filename) 

            if suf == suffix:

                #res.append(filename)

                res.append(os.path.join(root, filename))
    return res
def bbox2points_zs(bbox):
    """
    From bounding box yolo format
    to corner points cv2 rectangle
    """
    x, y, w, h = bbox
    xmin = int(round(x - (w / 2)))
    xmax = int(round(x + (w / 2)))
    ymin = int(round(y - (h / 2)))
    ymax = int(round(y + (h / 2)))
    return xmin, ymin, xmax, ymax

def main():
    args = parser()
    check_arguments_errors(args)
   
    input_dir = '/home/your/darknet'
    config_file = '/home/your/darknet/cfg/my_yolov3.cfg'
    data_file = '/home/your/darknet/cfg/my_data.data'
    weights = '/home/your/darknet/myData/backup/my_yolov3_last.weights'#改为自己的路径
    random.seed(3)  # deterministic bbox colors
    network, class_names, class_colors = darknet.load_network(
        config_file,
        data_file,
        weights,
        batch_size=args.batch_size
    )
    src_width = darknet.network_width(network)
    src_height = darknet.network_height(network)

    #生成保存图片路径文件夹
    save_dir = os.path.join(input_dir, 'object_result')
    # 去除首位空格
    save_dir=save_dir.strip()
    # 去除尾部 \ 符号
    save_dir=save_dir.rstrip("\\")
    # 判断路径是否存在 # 存在     True # 不存在   False
    isExists=os.path.exists(save_dir)
    # 判断结果
    if not isExists:
        # 如果不存在则创建目录 # 创建目录操作函数
        os.makedirs(save_dir) 

        print(save_dir+' 创建成功')
    else:
        # 如果目录存在 则不创建,并提示目录已存在
        print(save_dir + ' 目录已存在')

    image_list = get_files(input_dir, '.jpg')
    total_len = len(image_list)
    index = 0
    #while True:
    for i in range(0, total_len):
        image_name = image_list[i]
        src_image = cv2.imread(image_name)
        cv2.imshow('src_image', src_image)
        cv2.waitKey(1)

        prev_time = time.time()
        image, detections = image_detection(
            image_name, network, class_names, class_colors, args.thresh)
        #'''
        file_name, type_name = os.path.splitext(image_name)
        #print(file_name)
        #print(file_name.split(r'/'))
        print(''.join(file_name.split(r'/')[-1]) + 'bbbbbbbbb')
        cut_image_name_list = file_name.split(r'/')[-1:] #cut_image_name_list is list
        save_dir_image = os.path.join(save_dir ,cut_image_name_list[0])
        if not os.path.exists(save_dir_image):
            os.makedirs(save_dir_image)
        cut_image_name = ''.join(cut_image_name_list) #list to str
        object_count = 0
        
        
        for label, confidence, bbox in detections:
            cut_image_name_temp = cut_image_name + "_{}.jpg".format(object_count)
            object_count += 1
            xmin, ymin, xmax, ymax = bbox2points_zs(bbox)
            print("aaaaaaaaa x,{} y,{} w,{} h{}".format(xmin, ymin, xmax, ymax))
            xmin_coordinary = (int)(xmin * src_image.shape[1] / src_width-0.5)
            ymin_coordinary = (int)(ymin * src_image.shape[0] / src_height-0.5)
            xmax_coordinary = (int)(xmax * src_image.shape[1] / src_width+0.5)
            ymax_coordinary = (int)(ymax * src_image.shape[0] / src_height+0.5)
            if xmin_coordinary>src_image.shape[1]:
                xmin_coordinary = src_image.shape[1]
            if ymin_coordinary>src_image.shape[0]:
                ymin_coordinary = src_image.shape[0]
            if xmax_coordinary>src_image.shape[1]:
                xmax_coordinary = src_image.shape[1]
            if ymax_coordinary>src_image.shape[0]:
                ymax_coordinary = src_image.shape[0]

            if xmin_coordinary < 0:
                xmin_coordinary = 0
            if ymin_coordinary < 0:
                ymin_coordinary = 0
            if xmax_coordinary < 0:
                xmax_coordinary = 0
            if ymax_coordinary < 0:
                ymax_coordinary = 0 

            print("qqqqqqqq   x,{} y,{} w,{} h{}".format(xmin_coordinary, ymin_coordinary, xmax_coordinary, ymax_coordinary))
            out_iou_img = np.full((ymax_coordinary - ymin_coordinary, xmax_coordinary - xmin_coordinary, src_image.shape[2]), 114, dtype=np.uint8)
            out_iou_img[:,:] = src_image[ymin_coordinary:ymax_coordinary,xmin_coordinary:xmax_coordinary]
            cv2.imwrite(os.path.join(save_dir_image,cut_image_name_temp),out_iou_img)
        #'''
        #if args.save_labels:
        #if True:
            #save_annotations(image_name, image, detections, class_names)
        darknet.print_detections(detections, args.ext_output)
        fps = int(1/(time.time() - prev_time))
        print("FPS: {}".format(fps))
        if not args.dont_show:
            #cv2.imshow('Inference', image)
            cv2.waitKey(1)
            #if cv2.waitKey() & 0xFF == ord('q'):
                #break
        index += 1

if __name__ == "__main__":
    # unconmment next line for an example of batch processing
    # batch_detection_example()
    main()

接下来是yoloGui.py文件,此文件也是参考一位大佬的,然后略加修改,具体博客找不到了,在此感谢这位大佬,在运行前须自行在当前目录下创建img文件夹,并将需要检测的图片放入img文件夹,同时在img文件夹中创建result文件夹,用于保存检测结果。此文件只支持图片的检测,如果需要视频流检测,可以自行修改。 在运行过程中我发现一次只能运行4-5此就会爆现存,应该可以静态全局加载权重文件解决,欢迎各位指正。

应注意各个路径都正确,包括cfg文件等

在darknet文件夹下新建yoloGui.py文件,复制以下内容

import cv2
import numpy as np
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon, QPixmap, QImage
import os
import sys
import detect
from PIL import Image
import darknet
import random
import string

crop = False
count = False


class MyClassImage(QWidget):
    def __init__(self):
        super().__init__()
        self.label2 = None
        self.label3 = None
        self.initUI()   # 初始化界面并并展示
        self.openfile_name_image = ''   # 选择文件的路径
        self.image = None

    # 定义初始化组件
    def initUI(self):
        # 设置窗口大小
        self.resize(850, 400)
        # 设置窗口名称
        self.setWindowTitle("检测图片")
        # 创建按钮
        btn5 = QPushButton("退出检测图片", self)
        btn5.clicked.connect(self.close)
        btn1 = QPushButton("选择检测图片", self)

        # 关键点击事件,点击止呕启动self.select_image函数
        btn1.clicked.connect(self.select_image)
        btn2 = QPushButton("开始检测", self)

        # 关键点击事件,点击止呕启动self.detect函数
        btn2.clicked.connect(self.detect)

        # 创建标签,可以放文本或图片或空
        self.label2 = QLabel("", self)
        self.label2.resize(400, 300)
        self.label3 = QLabel("", self)
        self.label3.resize(400, 300)
        label4 = QLabel("              原始图片", self)
        label5 = QLabel("              检测图片", self)
        # 定义网格布局
        grid1 = QGridLayout()
        grid1.addWidget(label4, 0, 0)
        # 网格布局 第一行第二列防止label5
        grid1.addWidget(label5, 0, 1)

        hlo = QHBoxLayout()
        hlo.addStretch()
        grid = QGridLayout()
        # vlo.addStretch(0)
        grid.addWidget(btn1, 0, 0)
        grid.addWidget(btn2, 0, 1)
        grid.addWidget(btn5, 0, 2)

        hlo1 = QHBoxLayout()
        hlo1.addWidget(self.label2)
        hlo1.addWidget(self.label3)

        vlo = QVBoxLayout(self)
        vlo.addLayout(grid)
        vlo.addLayout(hlo1)
        vlo.addStretch(1)
        vlo.addLayout(grid1)
        vlo.addStretch(0)
        self.show()

    def closeEvent(self, event):
        result = QMessageBox.question(self, "提示:", "您真的要退出程序吗",
                                      QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
        if result == QMessageBox.Yes:
            event.accept()
        else:
            event.ignore()

    def detect(self):
        if self.image is None:
            print('没有选择图片')
        elif self.image is not None:
            # 检测图片
            run_detect(self.openfile_name_image)
            # 读取检测之后的图片
            img = cv2.imread('img/result/'+self.openfile_name_image.split('/')[-1])
            img = cv2.resize(img, (400, 300), interpolation=cv2.INTER_AREA)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            # cv2.imshow('test', img)
            # cv2.waitKey(20)
            # 将图片放在标签self.label3中
            a = QImage(img.data, img.shape[1], img.shape[0], img.shape[1] * 3, QImage.Format_RGB888)
            self.label3.setPixmap(QPixmap.fromImage(a))
        pass

    def select_image(self):
        # temp为选择文件的路径  这里打开的是这个main.py函数的同级目录下的img文件夹
        temp, _ = QFileDialog.getOpenFileName(self, "选择照片文件", r"./img/")
        if temp is not None:
            self.openfile_name_image = temp
        #     读取选择的图片
        self.image = cv2.imread(self.openfile_name_image)
        # print(self.openfile_name_image)
        # 将路径中的图片读取之后放在self.label2
        self.label2.setPixmap(QPixmap(str(self.openfile_name_image)))
        self.label2.setScaledContents(True)
        # 读取收缩放至(400, 300)
        self.label2.setMaximumSize(400, 300)
        self.label2.setScaledContents(True)


# 重写这个函数
def run_detect(path):
    try:
        image = Image.open(path)
    except:
        print('Open Error! Try again!')
    else:
    ##这里是模型检测函数,替换成自己的即可,这个函数返回的就是检测好的图片,然后保存在本地的同级目录下的img/result
        args = detect.parser()
        detect.check_arguments_errors(args)
        
        random.seed(3)  # deterministic bbox colors
        network, class_names, class_colors = darknet.load_network(
        args.config_file,
        args.data_file,
        args.weights,
        batch_size=args.batch_size
    )
        img_path = path
        r_image = detect.image_detection(img_path,network, class_names, class_colors, args.thresh)
        #r_image.save('img/result/' + path.split('/')[-1])#这里我报了没有save这个模块的错,遂改为imwrite
        cv2.imwrite('img/result/' + path.split('/')[-1],r_image)


if __name__ == '__main__':
    app = QApplication(sys.argv)
    mc = MyClassImage()
    sys.exit(app.exec_())

以下是运行截图:

 这只是个简易的界面编程,我也处于初学阶段,后续如果有更好的idea也会和大家分享,有不对的或不完善的地方也欢迎大家批评指正。后续会对界面进行一些美化,然后新增一些功能,会想办法解决out of memory的问题

对界面进行美化,新增输出物品label和number功能yolov3的Gui界面(2)--美化页面+输出识别物体名称及数量_perfectdisaster的博客-CSDN博客

解决out of memory问题,新增摄像头检测功能

yolov3的GUI界面(3)--解决out of memory问题,新增摄像头检测功能

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值