labelme转换mask

# Sample code from the TorchVision 0.3 Object Detection Finetuning Tutorial
# http://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
import torch
import os
import numpy as np
import torch
from PIL import Image



# from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
# from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor


class PennFudanDataset(object):
    def __init__(self, root, transforms):
        self.root = root
        self.transforms = transforms
        # load all image files, sorting them to
        # ensure that they are aligned
        self.imgs = list(sorted(os.listdir(os.path.join(root, "PNGImages"))))
        self.masks = list(sorted(os.listdir(os.path.join(root, "PedMasks"))))

    def __getitem__(self, idx):
        # load images ad masks
        img_path = os.path.join(self.root, "PNGImages", self.imgs[idx])
        mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])
        img = Image.open(img_path).convert("RGB")
        # note that we haven't converted the mask to RGB,
        # because each color corresponds to a different instance
        # with 0 being background
        mask = Image.open(mask_path)
        mask = np.array(mask)
        # instances are encoded as different colors
        obj_ids = np.unique(mask)
        # first id is the background, so remove it
        obj_ids = obj_ids[1:]

        # split the color-encoded mask into a set
        # of binary masks
        masks = mask == obj_ids[:, None, None]

        # get bounding box coordinates for each mask
        num_objs = len(obj_ids)
        boxes = []
        for i in range(num_objs):
            pos = np.where(masks[i])
            xmin = np.min(pos[1])
            xmax = np.max(pos[1])
            ymin = np.min(pos[0])
            ymax = np.max(pos[0])
            boxes.append([xmin, ymin, xmax, ymax])
        boxes = torch.as_tensor(boxes, dtype=torch.float32)
        # there is only one class
        labels = torch.ones((num_objs,), dtype=torch.int64)
        masks = np.array(masks,dtype=np.uint8)
        masks = torch.as_tensor(masks, dtype=torch.uint8)

        image_id = torch.tensor([idx])
        area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
        # suppose all instances are not crowd
        iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
        #torch.Size([2, 4]) torch.Size([2]) torch.Size([2, 536, 559]) torch.Size([1]) torch.Size([2]) torch.Size([2])
        target = {}
        target["boxes"] = boxes
        target["labels"] = labels
        target["masks"] = masks
        target["image_id"] = image_id
        target["area"] = area
        target["iscrowd"] = iscrowd
        if self.transforms is not None:
            img, target = self.transforms(img, target)
        return img, target

    def __len__(self):
        return len(self.imgs)
import cv2
from skimage.transform import resize
for js in os.listdir(r'C:\Users\Administrator\Desktop\paper\j'):#1, 2, 4, 5
    name = js.replace('json','jpg').replace('_','.')
    # print(name)
    for data in os.listdir(os.path.join(r'C:\Users\Administrator\Desktop\paper\j',js)):
        # try:
            if data == 'label.png':
                mask = Image.open(os.path.join(r'C:\Users\Administrator\Desktop\paper\j',js,data))
                # mask = Image.open('label.png')
                w,h = mask.size
                paper = np.zeros(shape=(h, w))
                mask = np.array(mask)
                obj_ids = np.unique(mask)
                obj_ids = obj_ids[1:]
                masks = mask == obj_ids[:, None, None]
                pig = np.zeros(shape=(h, w))
                # paper_mask = masks[1]
                pig_mask = masks[0]
                for i in range(h):
                    for j in range(w):
                        if pig_mask[i][j] != 0:
                            pig[i][j] = 255
                        # if paper_mask[i][j] == 1:
                        #     paper[i][j] = 255
                cv2.imwrite(r'C:\Users\Administrator\Desktop\paper\label\{}'.format(name), pig)
                # cv2.imwrite(r'D:\MASKpicture\datas\7\json\papers\{}'.format(name), paper)
            elif data == 'img.png':
                image = cv2.imread(os.path.join(r'C:\Users\Administrator\Desktop\paper\j', js, data))
                cv2.imwrite(r'C:\Users\Administrator\Desktop\paper\img\{}'.format(name),image)
        # except:
        #     print(js)

# for js in os.listdir(r'C:\Users\Administrator\Videos\p'):
#     name = js.replace('_json','.jpg')
#     for data in os.listdir(os.path.join(r'C:\Users\Administrator\Videos\p',js)):
#         if data == 'label.png':
#             mask_pig = Image.open(os.path.join(r'C:\Users\Administrator\Videos\p',js,data))
#             mask_paper = Image.open(os.path.join(r'C:\Users\Administrator\Videos\z', js, data))
#             # mask = Image.open('label.png')
#             w,h = mask_pig.size
#             mask_pig = np.array(mask_pig)
#             mask_paper = np.array(mask_paper)
#             obj_ids = np.unique(mask_pig)
#             obj_ids = obj_ids[1:]
#             pig_mask = mask_pig == obj_ids[:, None, None]
#             paper_mask = mask_paper == obj_ids[:, None, None]
#             pig = 0
#             paper = 0
#             for i in range(h):
#                 for j in range(w):
#                     if pig_mask[0][i][j] == 1:
#                         pig += 1
#                     if paper_mask[0][i][j] == 1:
#                         paper += 1
#             print(name,paper,pig,paper / pig)

# s = PennFudanDataset(r'D:\360Downloads\yolotest_json',None)
# print(s.__getitem__(0))
# import cv2
# image = cv2.imread(r'D:\360Downloads\yolotest_json_\PNGImages\yolotest.jpg')
# image = cv2.rectangle(image,(167, 433), (177, 443),(255,0,0))
# image = cv2.rectangle(image,(843, 348), (853, 358),(255,0,0))
# image = cv2.rectangle(image,(700, 272), (710, 282),(255,0,0))
# image = cv2.rectangle(image,(828, 450), (838, 460),(255,0,0))
# cv2.imshow('',image)
# cv2.waitKey(0)
# def get_model_instance_segmentation(num_classes):
#     # load an instance segmentation model pre-trained pre-trained on COCO
#     model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
#
#     # get number of input features for the classifier
#     in_features = model.roi_heads.box_predictor.cls_score.in_features
#     # replace the pre-trained head with a new one
#     model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#
#     # now get the number of input features for the mask classifier
#     in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
#     hidden_layer = 256
#     # and replace the mask predictor with a new one
#     model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
#                                                        hidden_layer,
#                                                        num_classes)
#
#     return model
#
# import csv
# import xlrd
# book = xlrd.open_workbook(r'D:\训练集\pig-zhangyong\20190910\训练数据.xlsx')
# sheet1 = book.sheets()[0]
# outputs = open(r'D:\训练集\已标注数据\10月28日\pig-zhangyong\20190910\paperonpig\paper_on_.csv','w',newline='')
# csv_write = csv.writer(outputs,dialect='excel')
# csv_write.writerow(['图片名','路径','纸的面积','猪的面积','面积比','实际体重'])
# for i in range(sheet1.nrows):
#     if i > 0:
#         row_values = sheet1.row_values(i)
#         name = row_values[0]
#         path = row_values[1]
#         weight = row_values[2]
#         dir = name.replace('.jpg','_json')
#         try:
#             for data in os.listdir(os.path.join(r'D:\训练集\已标注数据\10月28日\pig-zhangyong\20190910\paperonpig',dir)):
#                 if data == 'label.png':
# for dir in os.listdir(r'C:\Users\Administrator\Music\j'):
#     mask = Image.open(r'C:\Users\Administrator\Music\j\{}\label.png'.format(dir))  # os.path.join(r'D:\训练集\已标注数据\10月28日\pig-zhangyong\20190910\paperonpig',dir,data)
#     w, h = mask.size
#     mask = np.array(mask)
#     obj_ids = np.unique(mask)
#     obj_ids = obj_ids[1:]
#     masks = mask == obj_ids[:, None, None]
#     # masks = np.array(masks, dtype=np.uint8)
#     # masks = torch.as_tensor(masks, dtype=torch.uint8)
#     try:
#         paper_mask = masks[0]
#         pig_mask = masks[1]
#         pig = 0
#         paper = 0
#         for i in range(h):
#             for j in range(w):
#                 if pig_mask[i][j] == 1:
#                     pig += 1
#                 if paper_mask[i][j] == 1:
#                     paper += 1
#         print(dir.replace('_jpg_json','.jpg.jpg'),paper / pig)
#     # csv_write.writerow([name,path,paper,pig,paper / pig,weight])
#     # print([name,path,paper,pig,paper / pig,weight])
#     except IndexError:
#         #                         print(dir)
#         pass
# import cv2
# with open(r'C:\Users\Administrator\Desktop\newTest\we\面积比对应体重的训练文件.txt') as f:
#     datas = f.readlines()
#     for data in datas:
#         pig = cv2.imread(r'D:\test\pig\{}'.format(data.strip()))
#         pig = cv2.cvtColor(pig, cv2.COLOR_BGR2GRAY)
#         paper = cv2.imread(r'D:\test\paper\{}'.format(data.strip()))
#         paper = cv2.cvtColor(paper, cv2.COLOR_BGR2GRAY)
#         c_pig = 0
#         c_paprer = 0
#         for i in range(490):
#             for j in range(490):
#                 if pig[i][j] > 0:
#                     c_pig += 1
#                 if paper[i][j] > 0:
#                     c_paprer += 1
#         print(c_paprer / c_pig)
#
# #         except FileNotFoundError:
# #             pass


# import csv
# import xlrd
# book = xlrd.open_workbook(r'D:\训练集\已标注数据\10月28日\pig-wenchang\0911-wen\标注信息.xlsx')
# sheet1 = book.sheets()[0]
# outputs = open(r'D:\训练集\已标注数据\10月28日\pig-wenchang\0911-wen\0911_wen.csv','w',newline='')
# csv_write = csv.writer(outputs,dialect='excel')
# csv_write.writerow(['图片名','路径','纸的面积','猪的面积','面积比','实际体重'])
# for i in range(sheet1.nrows):
#     if i > 0:
#         row_values = sheet1.row_values(i)
#         name = row_values[0] + '.jpg'
#         path = row_values[1]
#         weight = row_values[2]
#         dir = name.replace('.jpg','_json')
#         try:
#             for data in os.listdir(os.path.join(r'D:\训练集\已标注数据\10月28日\pig-wenchang\0911-wen\json\p', dir)):
#                 if data == 'label.png':
#                     mask_pig = Image.open(os.path.join(r'D:\训练集\已标注数据\10月28日\pig-wenchang\0911-wen\json\p', dir, data))
#                     mask_paper = Image.open(os.path.join(r'D:\训练集\已标注数据\10月28日\pig-wenchang\0911-wen\json\z', dir, data))
#                     w, h = mask_pig.size
#                     mask_pig = np.array(mask_pig)
#                     mask_paper = np.array(mask_paper)
#                     obj_ids = np.unique(mask_pig)
#                     obj_ids = obj_ids[1:]
#                     pig_mask = mask_pig == obj_ids[:, None, None]
#                     paper_mask = mask_paper == obj_ids[:, None, None]
#                     pig = 0
#                     paper = 0
#                     for i in range(h):
#                         for j in range(w):
#                             if pig_mask[0][i][j] == 1:
#                                 pig += 1
#                             if paper_mask[0][i][j] == 1:
#                                 paper += 1
#                     csv_write.writerow([name, path, paper, pig, paper / pig, weight])
#                     print([name,path, paper, pig, paper / pig, weight])
#         except :
#             print(dir)

# def get_transform(train):
#     transforms = []
#     transforms.append(T.ToTensor())
#     if train:
#         transforms.append(T.RandomHorizontalFlip(0.5))
#     return T.Compose(transforms)
#
#
# def main():
#     # train on the GPU or on the CPU, if a GPU is not available
#     device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#
#     # our dataset has two classes only - background and person
#     num_classes = 2
#     # use our dataset and defined transformations
#     dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
#     dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))
#
#     # split the dataset in train and test set
#     indices = torch.randperm(len(dataset)).tolist()
#     dataset = torch.utils.data.Subset(dataset, indices[:-50])
#     dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])
#
#     # define training and validation data loaders
#     data_loader = torch.utils.data.DataLoader(
#         dataset, batch_size=2, shuffle=True, num_workers=4,
#         collate_fn=utils.collate_fn)
#
#     data_loader_test = torch.utils.data.DataLoader(
#         dataset_test, batch_size=1, shuffle=False, num_workers=4,
#         collate_fn=utils.collate_fn)
#
#     # get the model using our helper function
#     model = get_model_instance_segmentation(num_classes)
#
#     # move model to the right device
#     model.to(device)
#
#     # construct an optimizer
#     params = [p for p in model.parameters() if p.requires_grad]
#     optimizer = torch.optim.SGD(params, lr=0.005,
#                                 momentum=0.9, weight_decay=0.0005)
#     # and a learning rate scheduler
#     lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
#                                                    step_size=3,
#                                                    gamma=0.1)
#
#     # let's train it for 10 epochs
#     num_epochs = 10
#
#     for epoch in range(num_epochs):
#         # train for one epoch, printing every 10 iterations
#         train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)
#         # update the learning rate
#         lr_scheduler.step()
#         # evaluate on the test dataset
#         evaluate(model, data_loader_test, device=device)
#
#     print("That's it!")
#

# if __name__ == "__main__":
#     main()
import cv2 as cv
import numpy as np
import json
import os


def convertPolygonToMask(jsonfilePath):
    with open(jsonfilePath, "r", encoding='utf-8') as jsonf:
        jsonData = json.load(jsonf)
        img_h = jsonData["imageHeight"]
        img_w = jsonData["imageWidth"]
        mask = np.zeros((img_h, img_w), np.uint8)
        #图片中目标的数量 num=len(jsonData["shapes"])
        num = 0
        for obj in jsonData["shapes"]:
            label = obj["label"]
            polygonPoints = obj["points"]
            polygonPoints = np.array(polygonPoints,np.int32)
            # print("+" * 50, "\n", polygonPoints)
            # print(label)
            num+=1
            cv.drawContours(mask,[polygonPoints],-1,(255),-1)

    return mask

def main():
    jsonfileFolder = r"C:\Users\lhq\Desktop\label"
    maskSaveFolder = r"C:\Users\lhq\Desktop\mask"

    for jsonfile in os.listdir(jsonfileFolder):
        jsonfilePath = os.path.join(jsonfileFolder,jsonfile)
        mask = convertPolygonToMask(jsonfilePath)
        maskName = jsonfile.split(".")[0] + ".png"
        maskPath = os.path.join(maskSaveFolder,maskName)
        cv.imwrite(maskPath,mask)


if __name__ == "__main__":
    main()
    # jsonfilePath = r"K:\deepImage\del\1.json"
    # maskSaveFolder = r"K:\deepImage\del"
    # mask = convertPolygonToMask(jsonfilePath)
    # # 为了可视化把mask做一下阈值分割
    # _, th = cv.threshold(mask, 0, 255, cv.THRESH_BINARY)
    # cv.imshow("mask", th)
    # src = cv.imread(r"K:\deepImage\del\1.jpg")
    # cv.imwrite(maskSaveFolder + "\mask.png", mask)
    # cv.imshow("src", src)
    # cv.waitKey(0)
    # cv.destroyAllWindows()

SAM标注:

import cv2 as cv
import numpy as np
import json
import os


def convertPolygonToMask(jsonfilePath):
    with open(jsonfilePath, "r", encoding='utf-8') as jsonf:
        jsonData = json.load(jsonf)
        info = jsonData["info"]
        img_h = info["height"]
        img_w = info["width"]
        mask = np.zeros((img_h, img_w), np.uint8)
        # 图片中目标的数量 num=len(jsonData["shapes"])
        num = 0
        for obj in jsonData["objects"]:
            polygonPoints = obj["segmentation"]
            polygonPoints = np.array(polygonPoints, np.int32)
            # print("+" * 50, "\n", polygonPoints)
            # print(label)
            num += 1
            cv.drawContours(mask, [polygonPoints], -1, (255), -1)

    return mask


def main():
    jsonfileFolder = r"D:\picture\patch\json"
    maskSaveFolder = r"D:\dataset\train\mask"

    for jsonfile in os.listdir(jsonfileFolder):
        jsonfilePath = os.path.join(jsonfileFolder, jsonfile)
        mask = convertPolygonToMask(jsonfilePath)
        maskName = jsonfile.split(".")[0] + ".png"
        maskPath = os.path.join(maskSaveFolder, maskName)
        cv.imwrite(maskPath, mask)


if __name__ == "__main__":
    main()
    # jsonfilePath = r"K:\deepImage\del\1.json"
    # maskSaveFolder = r"K:\deepImage\del"
    # mask = convertPolygonToMask(jsonfilePath)
    # # 为了可视化把mask做一下阈值分割
    # _, th = cv.threshold(mask, 0, 255, cv.THRESH_BINARY)
    # cv.imshow("mask", th)
    # src = cv.imread(r"K:\deepImage\del\1.jpg")
    # cv.imwrite(maskSaveFolder + "\mask.png", mask)
    # cv.imshow("src", src)
    # cv.waitKey(0)
    # cv.destroyAllWindows()

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值