基于YOLOV8-pose的自动标注

解析JSON文件

{
    "version": "5.1.1",  # 字典
    "flags": {},    # 字典
    "shapes": [    # 列表嵌套字典
        {
            "label": "person",    # 字典
            "points": [    # 字典
                [
                    1184.0,
                    145.0
                ],
                [
                    1563.0,
                    743.0
                ]
            ],
            "group_id": 1,    # 字典
            "shape_type": "rectangle",    # 字典
            "flags": {}    # 字典
        },
        {
            "label": "0",
            "points": [
                [
                    1519.164794921875,
                    211.12527465820312
                ]
            ],
            "group_id": 1,
            "shape_type": "point",    #区别 框的话是rectangle 点的话就是point
            "flags": {}
        },
        ......
        {
            "label": "16",
            "points": [
                [
                    1305.552734375,
                    662.0692138671875
                ]
            ],
            "group_id": 1,
            "shape_type": "point",
            "flags": {}
        }
    ],
    "imagePath": "22112622_003270.jpg",    # 字典 文件名
    "imageData": "......."       # base64编码 最重要的一项
    "imageHeight": 1080,       # 照片高 字典
    "imageWidth": 1920    # 照片宽 字典
}        

现在解析了JSON,之后就要分析怎么在yolov8-pose中取出我想要的值

从YOLOV8-pose取出对应的信息

def postprocess(self, preds, img, orig_imgs):
    """Return detection results for a given input image or list of images."""

    preds = ops.non_max_suppression(preds,
                                    self.args.conf,
                                    self.args.iou,
                                    agnostic=self.args.agnostic_nms,
                                    max_det=self.args.max_det,
                                    classes=self.args.classes,
                                    nc=len(self.model.names))

    if not isinstance(orig_imgs, list):  # input images are a torch.Tensor, not a list
        orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
    torch.set_printoptions(sci_mode=False)

    results = []
    for i, pred in enumerate(preds):
        orig_img = orig_imgs[i]
        # 对应的全部信息
        pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape).round()
        # 对应的点坐标
        pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
        pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
        img_path = self.batch[0][i]
        results.append(
            Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts))

    return results

上述代码在 ultralytics/ultralytics/models/yolo/pose/predict.py中

yolov8-pose骨架信息图

可以根据此骨架关键点去做修改,改成自己需要的骨架信息

实现标注自动化

一张图片对应一个JSON文件,后面会有所改变,但是前面都一样,所以可以先把大概的轮廓搭建起来

# 创建一个字典
# 把原始字节码编码成base64字节码
base64_bytes = base64.b64encode(byte_content)

# 把base64字节码解码成utf-8格式的字符串
base64_string = base64_bytes.decode('utf-8')
data = {

       "version": "5.1.1",
       "flags": {},
       "shapes": [

        ]
}
json_data = json.dumps(data, indent=4)
# img_path在源代码有 是图片的绝对路径
#这里是为了imagePath做准备以及保证保存的JSON文件与原文件同名
imgName = img_path.split("/")[-1]
savePath = "/home/test/fall/" + imgName.split(".")[0] + ".json"
#将上述的字典写入我们创建的json文件中
with open(savePath, 'w') as file:
    file.write(json_data)

生成base64_string是借鉴这篇博客https://blog.csdn.net/nodototao/article/details/123800645?spm=1001.2101.3001.6650.2&utm_medium=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.pc_relevant_default&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.pc_relevant_default&utm_relevant_index=5

那么接下来就是将点的信息、框的信息写入到json文件中

for j in range(pred.shape[0]):
    # 将字典转换为JSON格式
    # 打开文件,并将JSON数据写入文件
    with open(savePath, 'r') as f:
        data = json.load(f)
    # 在字典列表中插入新的字典
    # 更新组信息
    bbox = {"label": "person", 'points': [[float(pred[j][0].item()), float(pred[j][1].item())],
                                          [float(pred[j][2].item()), float(pred[j][3].item())]], "group_id": (j + 1),
            "shape_type": "rectangle", "flags": {}}
    data['shapes'].append(bbox)
    data.update({"imagePath": imgName})
    data.update({"imageData": base64_string})
    # data.update({"imageData": str(base64.b64encode(open(img_path, "rb").read()))})
    data.update({"imageHeight": orig_img.shape[0]})
    data.update({"imageWidth": orig_img.shape[1]})
    for i in range(17):
        if float(pred_kpts[j][i][2].item()) > 0.5:
            # print(float(pred_kpts[:, i, 0]), float(pred_kpts[:, i, 1]))
            keypoints = {'label': str(i),
                         'points': [[float(pred_kpts[j][i][0].item()), float(pred_kpts[j][i][1].item())]],
                         'group_id': (j + 1), 'shape_type': 'point', 'flags': {}}


            data['shapes'].append(keypoints)
    # 将更新后的数据写回到JSON文件
    with open(savePath, 'w') as f:
        json.dump(data, f, indent=4)

完整代码

def postprocess(self, preds, img, orig_imgs):
    """Return detection results for a given input image or list of images."""

    preds = ops.non_max_suppression(preds,
                                    self.args.conf,
                                    self.args.iou,
                                    agnostic=self.args.agnostic_nms,
                                    max_det=self.args.max_det,
                                    classes=self.args.classes,
                                    nc=len(self.model.names))

    if not isinstance(orig_imgs, list):  # input images are a torch.Tensor, not a list
        orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
    torch.set_printoptions(sci_mode=False)

    results = []
    for i, pred in enumerate(preds):
        orig_img = orig_imgs[i]
        pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape).round()

        pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
        pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
        img_path = self.batch[0][i]
        with open(img_path, 'rb') as jpg_file:
            byte_content = jpg_file.read()
        # 把原始字节码编码成base64字节码
        base64_bytes = base64.b64encode(byte_content)

        # 把base64字节码解码成utf-8格式的字符串
        base64_string = base64_bytes.decode('utf-8')
        import json

        # 创建一个字典
        data = {

            "version": "5.1.1",
            "flags": {},
            "shapes": [

            ]

        }
        json_data = json.dumps(data, indent=4)
        imgName = img_path.split("/")[-1]
        print(img_path)
        savePath = "/home/ebo/test/high/_merge/" + imgName.split(".")[0] + ".json"
        with open(savePath, 'w') as file:
            file.write(json_data)
        for j in range(pred.shape[0]):
            # 将字典转换为JSON格式
            # 打开文件,并将JSON数据写入文件
            with open(savePath, 'r') as f:
                data = json.load(f)
            # 在字典列表中插入新的字典
            bbox = {"label": "person", 'points': [[float(pred[j][0].item()), float(pred[j][1].item())],
                                                  [float(pred[j][2].item()), float(pred[j][3].item())]],
                    "group_id": (j + 1),
                    "shape_type": "rectangle", "flags": {}}
            data['shapes'].append(bbox)
            data.update({"imagePath": imgName})
            data.update({"imageData": base64_string})
            # data.update({"imageData": str(base64.b64encode(open(img_path, "rb").read()))})
            data.update({"imageHeight": orig_img.shape[0]})
            data.update({"imageWidth": orig_img.shape[1]})
            for i in range(17):
                if float(pred_kpts[j][i][2].item()) > 0.5:
                    # print(float(pred_kpts[:, i, 0]), float(pred_kpts[:, i, 1]))
                    keypoints = {'label': str(i),
                                 'points': [[float(pred_kpts[j][i][0].item()), float(pred_kpts[j][i][1].item())]],
                                 'group_id': (j + 1), 'shape_type': 'point', 'flags': {}}
                    # keypoints = {'label': i, 'points': 0, 'group_id': 1, 'shape_type': 'point', 'flags': {}}
                    # print(keypoints)da

                    data['shapes'].append(keypoints)
            # 将更新后的数据写回到JSON文件
            with open(savePath, 'w') as f:
                json.dump(data, f, indent=4)

        # print(pred)
        results.append(
            Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts))

    return results

  • 16
    点赞
  • 16
    收藏
    觉得还不错? 一键收藏
  • 12
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 12
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值