run_test

def run_test(cfg,infshell, model, test_loader, final_output_dir, logger, use_wandb = False):
    DETECTION_RESULTS = []
    model.eval()
    local_rank = logocap.utils.comm.get_rank()

    if local_rank == 0:
        pbar = tqdm(total=len(test_loader))

    for i, (images, rgb, meta) in enumerate(test_loader):
        assert 1 == images.size(0), 'Test batch size should be 1'
        with torch.no_grad():
            outputs, _ = model(images.cuda())
        
        final_poses, scores = infshell(images, rgb, outputs, meta[0])
        if final_poses is None:
            continue
        for pose_id, (pose, score) in enumerate(zip(final_poses, scores)):
            xmin = pose[:,0].min().item()
            ymin = pose[:,1].min().item()
            xmax = pose[:,0].max().item()
            ymax = pose[:,1].max().item()
            bw = xmax - xmin
            bh = ymax - ymin

            ans = {
                'image_id': meta[0]['id'],
                'category_id': 1,
                'keypoints': pose.reshape(-1).tolist(),
                'score': score.item(),
                'bbox': [xmin,ymin,bw,bh]
                }
            DETECTION_RESULTS.append(ans)
        if local_rank == 0:
            pbar.update(1)

    if local_rank == 0:
        pbar.close()

    if logocap.utils.comm.get_world_size()>1:
        dist.barrier()

    DETECTION_RESULTS_ALL = comm.all_gather(DETECTION_RESULTS)
    DETECTION_RESULTS_ALL = sum(DETECTION_RESULTS_ALL,[])

    if not logocap.utils.comm.is_main_process():
        return 0.0

    results_path_gathered = os.path.join(final_output_dir,'eval_results.json')
    with open(results_path_gathered, 'w') as writer:
        json.dump(DETECTION_RESULTS_ALL,writer)
    
    coco = test_loader.dataset.coco
    coco_eval = coco.loadRes(results_path_gathered)
    if 'coco' in cfg.DATASET.DATASET:
        from pycocotools.cocoeval import COCOeval as COCOEvaluator
    elif 'crowd_pose' in cfg.DATASET.DATASET:
# from crowdposetools.cocoeval import COCOeval as COCOEvaluator
# evaluator = COCOEvaluator(coco, coco_eval, 'keypoints')
# evaluator.evaluate()
# evaluator.accumulate()
# evaluator.summarize()
        stats = evaluate_model(coco, coco_eval)

    if 'coco' in cfg.DATASET.DATASET:
        validation_msg = 'AP = {:.3f}| AP50 = {:.3f}| AP75 = {:.3f}| APM = {:.3f}| APL = {:.3f}'.format(stats[0],stats[1],stats[2],stats[3],stats[4])
    else:
        validation_msg = 'AP = {:.3f}| AP50 = {:.3f}| AP75 = {:.3f}| APE = {:.3f}| APM = {:.3f}| APH = {:.3f}'.format(stats[0],stats[1],stats[2],stats[8],stats[9], stats[10])

    logger.info('validation: {}'.format(validation_msg))
    if use_wandb:
        wandb.log({
            "AP": stats[0],
            "AP50": stats[1],
            "AP75": stats[2],
            "APM": stats[3],
            "APL": stats[4],
        })
    return float(stats[0])
Traceback (most recent call last):
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 363, in <module>
    main()
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 203, in main
    main_worker(
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 340, in main_worker
    perf_indicator = run_test(cfg, InferenceShell, model, test_loader, final_output_dir, logger, use_wandb = args.wandb)
  File "/media/wagnchogn/data/logocap-main/tools/train_net.py", line 138, in run_test
    stats = evaluate_model(coco, coco_eval)
  File "/media/wagnchogn/data/logocap-main/custom_evaluator.py", line 43, in evaluate_model
    coco_eval.evaluate()
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/pycocotools/cocoeval.py", line 148, in evaluate
    self.ious = {(imgId, catId): computeIoU(imgId, catId) \
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/pycocotools/cocoeval.py", line 148, in <dictcomp>
    self.ious = {(imgId, catId): computeIoU(imgId, catId) \
  File "/home/wagnchogn/anaconda3/envs/logocap-temp/lib/python3.8/site-packages/pycocotools/cocoeval.py", line 229, in computeOks
    e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
ValueError: operands could not be broadcast together with shapes (5,) (17,) 
DONE (t=0.01s)
creating index...
index created!
Running per image evaluation...
Evaluate annotation type *keypoints*

    def computeOks(self, imgId, catId):
        p = self.params
        # dimention here should be Nxm
        gts = self._gts[imgId, catId]
        dts = self._dts[imgId, catId]
        inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
        dts = [dts[i] for i in inds]
        if len(dts) > p.maxDets[-1]:
            dts = dts[0:p.maxDets[-1]]
        # if len(gts) == 0 and len(dts) == 0:
        if len(gts) == 0 or len(dts) == 0:
            return []
        ious = np.zeros((len(dts), len(gts)))
        sigmas = p.kpt_oks_sigmas
        vars = (sigmas * 2)**2
        k = len(sigmas)
        # compute oks between each detection and ground truth object
        for j, gt in enumerate(gts):
            # create bounds for ignore regions(double the gt bbox)
            g = np.array(gt['keypoints'])
            xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
            k1 = np.count_nonzero(vg > 0)
            bb = gt['bbox']
            x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
            y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
            for i, dt in enumerate(dts):
                d = np.array(dt['keypoints'])
                xd = d[0::3]; yd = d[1::3]
                if k1>0:
                    # measure the per-keypoint distance if keypoints visible
                    dx = xd - xg
                    dy = yd - yg
                else:
                    # measure minimum distance to keypoints in (x0,y0) & (x1,y1)
                    z = np.zeros((k))
                    dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
                    dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
                e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
                if k1 > 0:
                    e=e[vg > 0]
                ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
        return ious

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值