关于yolo-v3目标检测中报错找不到索引问题

from __future__ import division
 
from models import *
from utils.utils import *
from utils.datasets import *
 
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import sys
import time
import datetime
import argparse
 
from PIL import Image
 
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
 
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
 
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--image_folder", type=str, default="data/samples", help="path to dataset")
    parser.add_argument("--model_def", type=str, default="config/yolov3.cfg", help="path to model definition file")
    parser.add_argument("--weights_path", type=str, default="weights/yolov3.weights", help="path to weights file")
    parser.add_argument("--class_path", type=str, default="data/coco.names", help="path to class label file")
    parser.add_argument("--conf_thres", type=float, default=0.8, help="object confidence threshold")
    parser.add_argument("--nms_thres", type=float, default=0.4, help="iou thresshold for non-maximum suppression")
    parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
    parser.add_argument("--n_cpu", type=int, default=0, help="number of cpu threads to use during batch generation")
    parser.add_argument("--img_size", type=int, default=416, help="size of each image dimension")
    parser.add_argument("--checkpoint_model", type=str, help="path to checkpoint model")
    opt = parser.parse_args()
    print(opt)
 
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
    os.makedirs("output", exist_ok=True)
 
    # Set up model
    model = Darknet(opt.model_def, img_size=opt.img_size).to(device)
 
    if opt.weights_path.endswith(".weights"):
        # Load darknet weights
        model.load_darknet_weights(opt.weights_path)
    else:
        # Load checkpoint weights
        model.load_state_dict(torch.load(opt.weights_path))
 
    model.eval()  # Set in evaluation mode
 
    dataloader = DataLoader(
        ImageFolder(opt.image_folder, img_size=opt.img_size),
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.n_cpu,
    )
 
    classes = load_classes(opt.class_path)  # Extracts class labels from file
 
    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
 
    imgs = []  # Stores image paths
    img_detections = []  # Stores detections for each image index
 
    print("\nPerforming object detection:")
    prev_time = time.time()
    for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))
 
        # Get detections
        with torch.no_grad():
            detections = model(input_imgs)
            detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)
 
        # Log progress
        current_time = time.time()
        inference_time = datetime.timedelta(seconds=current_time - prev_time)
        prev_time = current_time
        print("\t+ Batch %d, Inference Time: %s" % (batch_i, inference_time))
 
        # Save image and detections
        imgs.extend(img_paths)
        img_detections.extend(detections)
 
    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]
 
    print("\nSaving images:")
    # Iterate through images and save plot of detections
    for img_i, (path, detections) in enumerate(zip(imgs, img_detections)):
 
        print("(%d) Image: '%s'" % (img_i, path))
 
        # Create plot
        img = np.array(Image.open(path))
        plt.figure()
        fig, ax = plt.subplots(1)
        ax.imshow(img)
 
        # Draw bounding boxes and labels of detections
        if detections is not None:
            # Rescale boxes to original image
            detections = rescale_boxes(detections, opt.img_size, img.shape[:2])
            unique_labels = detections[:, -1].cpu().unique()
            n_cls_preds = len(unique_labels)
            bbox_colors = random.sample(colors, n_cls_preds)
            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
 
                print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
 
                box_w = x2 - x1
                box_h = y2 - y1
 
                color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
                # Create a Rectangle patch
                bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor="none")
                # Add the bbox to the plot
                ax.add_patch(bbox)
                # Add label
                plt.text(
                    x1,
                    y1,
                    s=classes[int(cls_pred)],
                    color="white",
                    verticalalignment="top",
                    bbox={"color": color, "pad": 0},
                )
 
        # Save generated image with detections
        plt.axis("off")
        plt.gca().xaxis.set_major_locator(NullLocator())
        plt.gca().yaxis.set_major_locator(NullLocator())
        filename = path.split("/")[-1].split(".")[0]
        plt.savefig(f"output/{filename}.png", bbox_inches="tight", pad_inches=0.0)
        plt.close()
 
 

以上是我的检测代码,下面是我的报错信息

 
Saving images:
(0) Image: 'data/samples\1.jpg'
Traceback (most recent call last):
  File "D:\PyTorch-YOLOv3\PyTorch-YOLOv3\detect.py", line 116, in <module>
    print("\t+ Label: %s, Conf: %.5f" % (classes[int(cls_pred)], cls_conf.item()))
IndexError: list index out of range
 
 

遇到此类问题,新手如果是在其他博主找到的源码,一般本身是没错的。多考虑文件的配置,以及训练模型是否正确。以我的问题为例吧。 在我训练模型的时候,采用的分类是coco的,并且没有报错,导致我并没有发现。直到,我把下面的代码放在报错代码上面,我恍然大悟。

print(classes)
print(int(cls_pred))

根据代码报错位置来看,该代码对图片的识别已经完成,但是在索引分类却超出了,加入上一段代码后,索引为2,但我只有一个分类ren(人),于是我看了coco的分类低三个果然是person,一下子就明白了。解决办法就是,使用正确的训练配置重新训练,然后再进行识别。

https://ask.csdn.net/questions/8058924?spm=1001.2014.3001.5505,这是我原来问题位置可以参考,总之遇到这类问题先找报错代码位置,确定问题究竟在哪里,检查每个细节,不能想当然。

  • 0
    点赞
  • 6
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值