简介
本博文以二分类建筑数据集为例来介绍。一般用于语义分割的数据集,包含原始影像以及对应的标签。而用于实例分割的数据集,除去这些,还有边界框。借助skimage库中的某些函数可以将语义分割数据集用于实例分割。
Python代码
代码中的label和regionprops可以参考这篇博文。代码的功能只要是将二值图转化为coco格式的json标注。
import json
import os
import cv2
import cv2
from skimage.measure import label, regionprops
import matplotlib.pyplot as plt
import numpy as np
from osgeo import gdal
from skimage.morphology import erosion, square
import glob
import tqdm
def readGeoImg(imgPath):
data = gdal.Open(imgPath)
lastChannel = data.RasterCount + 1
arr = [data.GetRasterBand(idx).ReadAsArray() for idx in range(1, lastChannel)]
arr = np.clip(np.dstack(arr), 0, 255)
return arr.astype(np.uint8)
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
if __name__ == "__main__":
data = {'images':[],'annotations':[], 'categories':[{'id':0, 'name': 'farmland'}]}
id = 0
imgList = glob.glob("./image/*.tif")
for imgId, imgPath in tqdm.tqdm(enumerate(imgList)):
imagesInfo = {}
baseName = os.path.basename(imgPath)
img_0 = readGeoImg(imgPath).astype(np.uint8)
labelPath = imgPath.replace('image', 'label')
assert os.path.exists(labelPath)
mask = np.squeeze(readGeoImg(labelPath))
mask = np.where(mask > 0, 1, 0)
w, h = mask.shape
mask = erosion(mask, square(2)).astype(np.uint8)
label_0 = label(mask)
props = regionprops(label_0)
img_1 = img_0.copy()
mask2 = mask.copy()
imagesInfo['id'] = imgId
imagesInfo['file_name'] = baseName
imagesInfo['height'] = h
imagesInfo['width'] = w
data['images'].append(imagesInfo)
for id, prop in enumerate(props):
mask_ = np.where(label_0 == id + 1, 1, 0).astype(np.uint8)
contours, hierarchy = cv2.findContours(mask_, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img_1, contours,-1,(0,0,255),3)
# plt.imshow(img_1)
# plt.show()
annotation = {}
annotation['image_id'] = imgId ######所对应图片的id
annotation['id'] = id
annotation['category_id'] = 0 ############类别
annotation['bbox'] = [prop.bbox[1], prop.bbox[0], prop.bbox[3], prop.bbox[2]]
annotation['area'] = prop.area.astype(np.uint16)
contours1 = contours[0]
segmentation = list(contours1.reshape(1, -1).astype(np.uint16))
contours2 = np.array(segmentation).reshape(-1, 1, 2)
#cv2.drawContours(img_1, (contours2),-1,(0,0,255),3)
annotation['segmentation'] = segmentation
annotation['iscrowd'] = 0
data['annotations'].append(annotation)
# print("found bounding box", prop.bbox)
cv2.rectangle(img_1, (prop.bbox[1], prop.bbox[0]), (prop.bbox[3], prop.bbox[2]), (255, 0, 0), 2)
# plt.imshow(img_1)
# plt.show()
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.imshow(img_0)
ax2.imshow(mask)
ax3.imshow(img_1)
plt.show()
json.dump(data, open('label.json','w'), indent=4, cls=NpEncoder)