singleshotpose姿态识别网络在 Windows10测试---附加说明

singleshotpose姿态识别网络在 Windows10测试—附加说明
非常抱歉对于前面文章的问题没有及时回复,
这是关于文章:singleshotpose姿态识别网络在 Windows10测试,的附加说明,
文章链接:
https://blog.csdn.net/CDSN_of_Shuairan/article/details/124413141

1,我的设备环境
电脑系统,windows10
运行平台,pycharm
框架,pytorch, torchvision== 0.11.3 torch ==1.10.2

2, 文献 :Real-Time Seamless Single Shot 6D Object Pose Prediction
直接在bing里搜这个名字就能搜到文章
开源代码链接:https://github.com/Microsoft/singleshotpose/

具体的代码运行步骤参看原文:https://blog.csdn.net/CDSN_of_Shuairan/article/details/124413141

在前文的评论区都反应了按照步骤代码无法跑通的问题,这里增加前文
所提到的single_obj_testing.py 以及 sssstestsss.py的代码

single_obj_testing.py 的代码如下:

#matplotlib inline
import os
import time
import torch
from torch.autograd import Variable
from skimage.transform import resize
from torchvision import datasets, transforms
import scipy.io
import warnings

import matplotlib.pyplot as plt
import scipy.misc

from darknet import Darknet
import dataset
from utils import *
from MeshPly import MeshPly
warnings.filterwarnings("ignore")
# Create new directory
def makedirs(path):
    if not os.path.exists( path ):
        os.makedirs( path )


def valid(datacfg, modelcfg, weightfile):
    def truths_length(truths, max_num_gt=50):
        for i in range(max_num_gt):
            if truths[i][1] == 0:
                return i

    # Parse configuration files
    data_options = read_data_cfg(datacfg)
    valid_images = data_options['valid']
    meshname = data_options['mesh']
    backupdir = data_options['backup']
    name = data_options['name']
    gpus = data_options['gpus']
    fx = float(data_options['fx'])
    fy = float(data_options['fy'])
    u0 = float(data_options['u0'])
    v0 = float(data_options['v0'])
    im_width = int(data_options['width'])
    im_height = int(data_options['height'])
    if not os.path.exists(backupdir):
        makedirs(backupdir)

    # Parameters
    seed = int(time.time())
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    torch.cuda.manual_seed(seed)
    save = False
    visualize = True
    testtime = True
    num_classes = 1
    testing_samples = 0.0
    edges_corners = [[0, 1], [0, 2], [0, 4], [1, 3], [1, 5], [2, 3], [2, 6], [3, 7], [4, 5], [4, 6], [5, 7], [6, 7]]
    if save:
        makedirs(backupdir + '/test')
        makedirs(backupdir + '/test/gt')
        makedirs(backupdir + '/test/pr')
    # To save
    testing_error_trans = 0.0
    testing_error_angle = 0.0
    testing_error_pixel = 0.0
    errs_2d = []
    errs_3d = []
    errs_trans = []
    errs_angle = []
    errs_corner2D = []
    preds_trans = []
    preds_rot = []
    preds_corners2D = []
    gts_trans = []
    gts_rot = []
    gts_corners2D = []

    # Read object model information, get 3D bounding box corners
    mesh = MeshPly(meshname)
    vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
    corners3D = get_3D_corners(vertices)
    try:
        diam = float(options['diam'])
    except:
        diam = calc_pts_diameter(np.array(mesh.vertices))

    # Read intrinsic camera parameters
    intrinsic_calibration = get_camera_intrinsic(u0, v0, fx, fy)

    # Get validation file names
    with open(valid_images) as fp:
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]

    # Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode
    model = Darknet(modelcfg)
    model.print_network()
    model.load_weights(weightfile)
    model.cuda()
    model.eval()
    test_width = model.test_width
    test_height = model.test_height
    num_keypoints = model.num_keypoints
    num_labels = num_keypoints * 2 + 3

    # Get the parser for the test dataset
    valid_dataset = dataset.listDataset(valid_images,
                                        shape=(test_width, test_height),
                                        shuffle=False,
                                        transform=transforms.Compose([transforms.ToTensor(), ]))

    # Specify the number of workers for multiple processing, get the dataloader for the test dataset
    kwargs = {'num_workers': 1, 'pin_memory': True}
    test_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, **kwargs)

    logging("   Testing {}...".format(name))
    logging("   Number of test samples: %d" % len(test_loader.dataset))
    # Iterate through test batches (Batch size for test data is 1)
    count = 0
    for batch_idx, (data, target) in enumerate(test_loader):

        print('#############')
        print(batch_idx)
        # Images
        img = data[0, :, :, :]
        img = img.numpy().squeeze()
        img = np.transpose(img, (1, 2, 0))

        t1 = time.time()
        # Pass data to GPU
        data = data.cuda()
        target = target.cuda()
        # Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference
        data = Variable(data, volatile=True)
        t2 = time.time()
        # Forward pass
        output = model(data).data
        t3 = time.time()
        # Using confidence threshold, eliminate low-confidence predictions
        all_boxes = get_region_boxes(output, num_classes, num_keypoints)
        t4 = time.time()
        # Evaluation
        # Iterate through all batch elements
        for box_pr, target in zip([all_boxes], [target[0]]):
            print('@@@@@@@@@@@@')
            print(target)
            # For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)
            truths = target.view(-1, num_keypoints * 2 + 3)
            # Get how many objects are present in the scene
            num_gts = truths_length(truths)
            print('num_gts 为:', num_gts)
            print('num_keypoints 为', num_keypoints)
            # Iterate through each ground-truth object
            for k in range(num_gts):  # num_gts为1
                box_gt = list()
                for j in range(1, 2 * num_keypoints + 1):  #j循环19次
                    box_gt.append(truths[k][j])
                box_gt.extend([1.0, 1.0])
                box_gt.append(truths[k][0])

                # Denormalize the corner predictions
                corners2D_gt = np.array(np.reshape(box_gt[:18], [9, 2]), dtype='float32')
                corners2D_pr = np.array(np.reshape(box_pr[:18], [9, 2]), dtype='float32')
                corners2D_gt[:, 0] = corners2D_gt[:, 0] * im_width
                corners2D_gt[:, 1] = corners2D_gt[:, 1] * im_height
                corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width
                corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height
                preds_corners2D.append(corners2D_pr)
                gts_corners2D.append(corners2D_gt)

                # Compute corner prediction error
                corner_norm = np.linalg.norm(corners2D_gt - corners2D_pr, axis=1)
                corner_dist = np.mean(corner_norm)
                errs_corner2D.append(corner_dist)

                # Compute [R|t] by pnp
                R_gt, t_gt = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)),
                                          dtype='float32'), corners2D_gt,
                                 np.array(intrinsic_calibration, dtype='float32'))
                R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)),
                                          dtype='float32'), corners2D_pr,
                                 np.array(intrinsic_calibration, dtype='float32'))

                # Compute translation error
                trans_dist = np.sqrt(np.sum(np.square(t_gt - t_pr)))
                errs_trans.append(trans_dist)

                # Compute angle error
                angle_dist = calcAngularDistance(R_gt, R_pr)
                errs_angle.append(angle_dist)

                # Compute pixel error
                Rt_gt = np.concatenate((R_gt, t_gt), axis=1)
                Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
                proj_2d_gt = compute_projection(vertices, Rt_gt, intrinsic_calibration)
                proj_2d_pred = compute_projection(vertices, Rt_pr, intrinsic_calibration)
                proj_corners_gt = np.transpose(compute_projection(corners3D, Rt_gt, intrinsic_calibration))
                proj_corners_pr = np.transpose(compute_projection(corners3D, Rt_pr, intrinsic_calibration))
                norm = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)
                pixel_dist = np.mean(norm)
                errs_2d.append(pixel_dist)

                if visualize:
                    # Visualize
                    plt.xlim((0, im_width))
                    plt.ylim((0, im_height))
                    # plt.imshow(scipy.misc.imresize(img, (im_height, im_width)))
                    plt.imshow(resize(img, output_shape=(im_height, im_width)))
                    # plt.imshow(img)
                    # Projections
                    for edge in edges_corners:
                        plt.plot(proj_corners_gt[edge, 0], proj_corners_gt[edge, 1], color='g', linewidth=1.0)
                        plt.plot(proj_corners_pr[edge, 0], proj_corners_pr[edge, 1], color='b', linewidth=1.0)
                    plt.gca().invert_yaxis()
                    if batch_idx < 20:

                        plt.savefig('./resultimages/'+ str(batch_idx) + 'duck' + '001.jpg')
                        plt.close()
                    # plt.show()

                # Compute 3D distances
                transform_3d_gt = compute_transformation(vertices, Rt_gt)
                transform_3d_pred = compute_transformation(vertices, Rt_pr)
                norm3d = np.linalg.norm(transform_3d_gt - transform_3d_pred, axis=0)
                vertex_dist = np.mean(norm3d)
                errs_3d.append(vertex_dist)

                # Sum errors
                testing_error_trans += trans_dist
                testing_error_angle += angle_dist
                testing_error_pixel += pixel_dist
                testing_samples += 1
                count = count + 1

                if save:
                    preds_trans.append(t_pr)
                    gts_trans.append(t_gt)
                    preds_rot.append(R_pr)
                    gts_rot.append(R_gt)

                    np.savetxt(backupdir + '/test/gt/R_' + valid_files[count][-8:-3] + 'txt',
                               np.array(R_gt, dtype='float32'))
                    np.savetxt(backupdir + '/test/gt/t_' + valid_files[count][-8:-3] + 'txt',
                               np.array(t_gt, dtype='float32'))
                    np.savetxt(backupdir + '/test/pr/R_' + valid_files[count][-8:-3] + 'txt',
                               np.array(R_pr, dtype='float32'))
                    np.savetxt(backupdir + '/test/pr/t_' + valid_files[count][-8:-3] + 'txt',
                               np.array(t_pr, dtype='float32'))
                    np.savetxt(backupdir + '/test/gt/corners_' + valid_files[count][-8:-3] + 'txt',
                               np.array(corners2D_gt, dtype='float32'))
                    np.savetxt(backupdir + '/test/pr/corners_' + valid_files[count][-8:-3] + 'txt',
                               np.array(corners2D_pr, dtype='float32'))

        t5 = time.time()

    # Compute 2D projection error, 6D pose error, 5cm5degree error
    px_threshold = 5  # 5 pixel threshold for 2D reprojection error is standard in recent sota 6D object pose estimation works
    eps = 1e-5
    acc = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (len(errs_2d) + eps)
    acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (
                len(errs_trans) + eps)
    acc3d10 = len(np.where(np.array(errs_3d) <= diam * 0.1)[0]) * 100. / (len(errs_3d) + eps)
    acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (
                len(errs_trans) + eps)
    corner_acc = len(np.where(np.array(errs_corner2D) <= px_threshold)[0]) * 100. / (len(errs_corner2D) + eps)
    mean_err_2d = np.mean(errs_2d)
    mean_corner_err_2d = np.mean(errs_corner2D)
    nts = float(testing_samples)

    if testtime:
        print('-----------------------------------')
        print('  tensor to cuda : %f' % (t2 - t1))
        print('    forward pass : %f' % (t3 - t2))
        print('get_region_boxes : %f' % (t4 - t3))
        print(' prediction time : %f' % (t4 - t1))
        print('            eval : %f' % (t5 - t4))
        print('-----------------------------------')

    # Print test statistics
    logging('Results of {}'.format(name))
    logging('   Acc using {} px 2D Projection = {:.2f}%'.format(px_threshold, acc))
    logging('   Acc using 10% threshold - {} vx 3D Transformation = {:.2f}%'.format(diam * 0.1, acc3d10))
    logging('   Acc using 5 cm 5 degree metric = {:.2f}%'.format(acc5cm5deg))
    logging("   Mean 2D pixel error is %f, Mean vertex error is %f, mean corner error is %f" % (
    mean_err_2d, np.mean(errs_3d), mean_corner_err_2d))
    logging('   Translation error: %f m, angle error: %f degree, pixel error: % f pix' % (
    testing_error_trans / nts, testing_error_angle / nts, testing_error_pixel / nts))

    if save:
        predfile = backupdir + '/predictions_linemod_' + name + '.mat'
        scipy.io.savemat(predfile,
                         {'R_gts': gts_rot, 't_gts': gts_trans, 'corner_gts': gts_corners2D, 'R_prs': preds_rot,
                          't_prs': preds_trans, 'corner_prs': preds_corners2D})


if __name__ == '__main__':

    #datacfg = 'cfg/ape.data'
    # datacfg = 'cfg/can.data'
    #
    # datacfg = 'cfg/cat.data'
    #
    datacfg = 'cfg/cat.data'



    modelcfg = 'cfg/yolo-pose.cfg'
    weightfile = 'backup/cat/model_backup.weights'
    valid(datacfg, modelcfg, weightfile)
    

sssstestsss.py的代码如下:

#matplotlib inline
import os
os.sys.path.append('..')
import torch
import matplotlib.pyplot as plt
import scipy.misc
import warnings
import sys
import argparse
warnings.filterwarnings("ignore")
from torch.autograd import Variable
from torchvision import datasets, transforms

import dataset_multi
from darknet_multi import Darknet
from utils_multi import *
from cfg import parse_cfg
from MeshPly import MeshPly

import matplotlib.pyplot as plt
# from scipy.misc import imsave
import scipy.io
import scipy.misc


def valid(datacfg, cfgfile, weightfile):
    def truths_length(truths):
        for i in range(50):
            if truths[i][1] == 0:
                return i

    # Parse data configuration files
    data_options = read_data_cfg(datacfg)
    print('$$$$$$$$$$$$$$$')
    print(data_options)
    valid_images = data_options['valid'][1:]   # 由于文件名出现  '../filename/'  导致读取不了文件,做了[1:]的处理
    print(valid_images)

    meshname = data_options['mesh'][1:]   # 由于文件名出现  '../filename/'  导致读取不了文件,做了[1:]的处理
    name = data_options['name']
    print('#####################')
    print(name)
    im_width = int(data_options['im_width'])
    im_height = int(data_options['im_height'])
    fx = float(data_options['fx'])
    fy = float(data_options['fy'])
    u0 = float(data_options['u0'])
    v0 = float(data_options['v0'])

    # Parse net configuration file
    net_options = parse_cfg(cfgfile)[0]
    loss_options = parse_cfg(cfgfile)[-1]
    conf_thresh = float(net_options['conf_thresh'])
    num_keypoints = int(net_options['num_keypoints'])
    num_classes = int(loss_options['classes'])
    num_anchors = int(loss_options['num'])
    anchors = [float(anchor) for anchor in loss_options['anchors'].split(',')]

    # Read object model information, get 3D bounding box corners, get intrinsics
    mesh = MeshPly(meshname)
    vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
    corners3D = get_3D_corners(vertices)
    diam = float(data_options['diam'])
    intrinsic_calibration = get_camera_intrinsic(u0, v0, fx, fy)  # camera params

    # Network I/O params
    num_labels = 2 * num_keypoints + 3  # +2 for width, height, +1 for object class
    errs_2d = []  # to save
    with open(valid_images) as fp:  # validation file names
        tmp_files = fp.readlines()
        valid_files = [item.rstrip() for item in tmp_files]

    # Compute-related Parameters
    use_cuda = True  # whether to use cuda or no
    kwargs = {'num_workers': 4, 'pin_memory': True}  # number of workers etc.

    # Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode
    model = Darknet(cfgfile)
    model.load_weights(weightfile)
    model.cuda()
    model.eval()

    # Get the dataloader for the test dataset
    valid_dataset = dataset_multi.listDataset(valid_images[2:], shape=(model.width, model.height), shuffle=False,
                                              objclass=name, transform=transforms.Compose([transforms.ToTensor(), ]))
    test_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, **kwargs)

    # Iterate through test batches (Batch size for test data is 1)
    logging('Testing {}...'.format(name))
    for batch_idx, (data, target) in enumerate(test_loader):

        t1 = time.time()
        # Pass data to GPU
        if use_cuda:
            data = data.cuda()
            # target = target.cuda()

        # Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference
        data = Variable(data, volatile=True)
        t2 = time.time()

        # Forward pass
        output = model(data).data
        t3 = time.time()

        # Using confidence threshold, eliminate low-confidence predictions
        trgt = target[0].view(-1, num_labels)
        all_boxes = get_multi_region_boxes(output, conf_thresh, num_classes, num_keypoints, anchors, num_anchors,
                                           int(trgt[0][0]), only_objectness=0)
        t4 = time.time()

        # Iterate through all images in the batch
        for i in range(output.size(0)):

            # For each image, get all the predictions
            boxes = all_boxes[i]

            # For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)
            truths = target[i].view(-1, num_labels)

            # Get how many object are present in the scene
            num_gts = truths_length(truths)

            # Iterate through each ground-truth object
            for k in range(num_gts):
                box_gt = list()
                for j in range(1, num_labels):
                    box_gt.append(truths[k][j])
                box_gt.extend([1.0, 1.0])
                box_gt.append(truths[k][0])

                # If the prediction has the highest confidence, choose it as our prediction
                best_conf_est = -sys.maxsize
                for j in range(len(boxes)):
                    if (boxes[j][2 * num_keypoints] > best_conf_est) and (
                            boxes[j][2 * num_keypoints + 2] == int(truths[k][0])):
                        best_conf_est = boxes[j][2 * num_keypoints]
                        box_pr = boxes[j]
                        match = corner_confidence(box_gt[:2 * num_keypoints],
                                                  torch.FloatTensor(boxes[j][:2 * num_keypoints]))

                # Denormalize the corner predictions
                corners2D_gt = np.array(np.reshape(box_gt[:2 * num_keypoints], [-1, 2]), dtype='float32')
                corners2D_pr = np.array(np.reshape(box_pr[:2 * num_keypoints], [-1, 2]), dtype='float32')
                corners2D_gt[:, 0] = corners2D_gt[:, 0] * im_width
                corners2D_gt[:, 1] = corners2D_gt[:, 1] * im_height
                corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width
                corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height
                corners2D_gt_corrected = fix_corner_order(corners2D_gt)  # Fix the order of corners

                # Compute [R|t] by pnp
                objpoints3D = np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)),
                                       dtype='float32')
                K = np.array(intrinsic_calibration, dtype='float32')
                R_gt, t_gt = pnp(objpoints3D, corners2D_gt_corrected, K)
                R_pr, t_pr = pnp(objpoints3D, corners2D_pr, K)

                # Compute pixel error
                Rt_gt = np.concatenate((R_gt, t_gt), axis=1)
                Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
                proj_2d_gt = compute_projection(vertices, Rt_gt, intrinsic_calibration)
                proj_2d_pred = compute_projection(vertices, Rt_pr, intrinsic_calibration)
                proj_corners_gt = np.transpose(compute_projection(corners3D, Rt_gt, intrinsic_calibration))
                proj_corners_pr = np.transpose(compute_projection(corners3D, Rt_pr, intrinsic_calibration))
                norm = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)
                pixel_dist = np.mean(norm)
                errs_2d.append(pixel_dist)

        t5 = time.time()

    # Compute 2D projection score
    eps = 1e-5
    for px_threshold in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:
        acc = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (len(errs_2d) + eps)
        # Print test statistics
        logging('   Acc using {} px 2D Projection = {:.2f}%'.format(px_threshold, acc))


if __name__ == '__main__':

    modelcfg = 'multi_obj_pose_estimation/cfg/yolo-pose-multi.cfg'
    datacfg = 'multi_obj_pose_estimation/cfg/ape_occlusion.data'
    weightfile = 'backup_multi/model_backup.weights'

    valid(datacfg, modelcfg, weightfile)
    datacfg = 'multi_obj_pose_estimation/cfg/can_occlusion.data'
    valid(datacfg, modelcfg, weightfile)
    datacfg = 'multi_obj_pose_estimation/cfg/cat_occlusion.data'
    valid(datacfg, modelcfg, weightfile)
    datacfg = 'multi_obj_pose_estimation/cfg/duck_occlusion.data'
    valid(datacfg, modelcfg, weightfile)
    datacfg = 'multi_obj_pose_estimation/cfg/glue_occlusion.data'
    valid(datacfg, modelcfg, weightfile)
    datacfg = 'multi_obj_pose_estimation/cfg/holepuncher_occlusion.data'
    valid(datacfg, modelcfg, weightfile)
  • 3
    点赞
  • 3
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值