mask-rcnn训练预处理-将自己格式转换为coco--合并coco类型的json--最近修改版本:

将自己格式转换为coco 

# -*- coding:utf-8 -*-
#https://blog.csdn.net/u010103202/article/details/81635436
#labelme标注的数据分析
#https://blog.csdn.net/wc781708249/article/details/79595174
#数据先转换成lableme,然后调用labelme的函数转换为VOC
#https://github.com/wkentaro/labelme/tree/master/examples/semantic_segmentation


import sys
reload(sys)
sys.setdefaultencoding('utf8')
import argparse
import json
import os
import os.path as osp
import warnings
import copy
import numpy as np
import PIL.Image
from skimage import io
import yaml
import cv2
import random
import glob
import shutil
from sklearn.model_selection import train_test_split
np.random.seed(41)

def mkdir_os(path):
    if not os.path.exists(path):
        os.makedirs(path)

# 数据地址
orijsonpath = "seg_shachepian1128_photoneo_20191204_204120.json"
dataname = "shachepian" #--------------------
#0为背景
classname_to_id = {dataname: 1}
ori_path = "./image"
save_train = "./seg/train"
save_test = "./seg/test"
json_train = "./json/train"
json_test = "./json/test"
mkdir_os(save_train)
mkdir_os(save_test)
mkdir_os(json_train)
mkdir_os(json_test)
train_txt = open('train.txt',"wr")

#coco数据类型转换
class Lableme2CoCo:
    def __init__(self):
        self.images = []
        self.annotations = []
        self.categories = []
        self.img_id = 0
        self.ann_id = 0

    def save_coco_json(self, instance, save_path):
        import io
        #json.dump(instance, io.open(save_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=1)  # indent=2 更加美观显示
        with io.open(save_path, 'w', encoding="utf-8") as outfile:
            my_json_str = json.dumps(instance, ensure_ascii=False, indent=1)
            if isinstance(my_json_str, str):
                my_json_str = my_json_str.decode("utf-8")
            outfile.write(my_json_str)

    # 由json文件构建COCO
    def to_coco(self, json_path_list):
        self._init_categories()
        for json_path in json_path_list:
            obj = self.read_jsonfile(json_path)
            self.images.append(self._image(obj, json_path))
            shapes = obj['shapes']
            for shape in shapes:
                annotation = self._annotation(shape)
                self.annotations.append(annotation)
                self.ann_id += 1
            self.img_id += 1
        instance = {}
        instance['info'] = 'spytensor created'
        instance['license'] = ['license']
        instance['images'] = self.images
        instance['annotations'] = self.annotations
        instance['categories'] = self.categories
        return instance

    # 构建类别
    def _init_categories(self):
        for k, v in classname_to_id.items():
            category = {}
            category['id'] = v
            category['name'] = k
            self.categories.append(category)

    # 构建COCO的image字段
    def _image(self, obj, path):
        image = {}
        from labelme import utils
        #img_x = utils.img_b64_to_arr(obj['imageData'])
        #---------------------------------------
        print(str(obj['imagePath']))
        name = str(obj['imagePath']).split('/')[-1]
        newname = os.path.join(ori_path,name)
        img_x = cv2.imread(newname)
        h, w = img_x.shape[:-1]
        image['height'] = h
        image['width'] = w
        image['id'] = self.img_id
        image['file_name'] = os.path.basename(path).replace("json", jpgpng)
        return image

    # 构建COCO的annotation字段
    def _annotation(self, shape):
        label = shape['label']
        points = shape['points']
        annotation = {}
        annotation['id'] = self.ann_id
        annotation['image_id'] = self.img_id
        annotation['category_id'] = int(classname_to_id[label])
        annotation['segmentation'] = [np.asarray(points).flatten().tolist()]
        annotation['bbox'] = self._get_box(points)
        if int(shape['flags']) == 0:
            annotation['iscrowd'] = 0
            #annotation['ignore'] = 0
        elif int(shape['flags']) == 1:
            annotation['iscrowd'] = 1
            #annotation['ignore'] = 1
            
        annotation['area'] = 1.0
        return annotation

    # 读取json文件,返回一个json对象
    def read_jsonfile(self, path):
        import io
        #with io.open(path, "r", encoding='utf-8') as f:
        with open(path, "r") as f:
            return json.load(f)

    # COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式
    def _get_box(self, points):
        min_x = min_y = np.inf
        max_x = max_y = 0
        for x, y in points:
            min_x = min(min_x, x)
            min_y = min(min_y, y)
            max_x = max(max_x, x)
            max_y = max(max_y, y)
        return [min_x, min_y, max_x - min_x, max_y - min_y]

# 获取数据
data = []
savedata = []
# 判断是否已经处理过
if os.path.exists("./saved.json")==True:
    with open("./saved.json") as f:
        for line in f:
            data.append(json.loads(line))
else:
    with open(orijsonpath) as f:
        for line in f:
            data.append(json.loads(line))
num = 0
lendata_num = 0
count = len(data)
trainimg = os.listdir(ori_path)
# 遍历获取的数据
for lab in range(count):
    num += 1
    #print num,"/",count
    onedate = data[lab]
    name = onedate["url_image"]
    name = str(name).split("/")[-1]
    jpgpng = name.split(".")[-1]
    if name not in trainimg:
        continue
    img = cv2.imread(os.path.join(ori_path,name))
    if img is None:
        continue
    temp_hh,temp_ww,c = img.shape
    hh = temp_hh
    ww = temp_ww
    if temp_hh > temp_ww:
        srcCopy = cv2.transpose(img);  
        img = cv2.flip(srcCopy, 0)
        tempimg = copy.deepcopy(img)
        cv2.imwrite(os.path.join(ori_path,name),tempimg)
        hh,ww,c = img.shape
    
    tempNum = random.randint(1,10)
    point_size = 3
    thickness = 4
    if(len(onedate["result"])==0):
        continue
    if 'data' in onedate["result"] or 'data' in onedate["result"][0]:
        
        json_jpg={}
        json_jpg["imagePath"] = str(os.path.join(ori_path,name))
        json_jpg["imageData"] = None
        shapes=[]

        for key in range(len(onedate["result"])):
            ndata = onedate["result"][key]["data"]
            if len(ndata)< 8:
                continue
            # 判断,如果图片是立着的,放平它,保证,宽大高小
            if temp_hh > temp_ww:
                for k in range(len(ndata)/2):
                    temp1 = ndata[2*k]
                    temp2 = ndata[2*k+1]
                    ndata[2*k]= temp2
                    ndata[2*k+1]= temp_ww - temp1 -1
            # 修改原始,方便保存
            onedate["result"][key]["data"] = ndata

            points=[]
            # ignore 黄色标出
            if onedate["result"][key]["tagtype"] in "purpose2":
                for k in range(len(ndata)/2):
                    cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (0, 255, 255), thickness)
                    points.append([ndata[2*k],ndata[2*k+1]])
            # add 红色标出
            elif onedate["result"][key]["tagtype"] in "purpose1":
                for k in range(len(ndata)/2):
                    cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (0, 0, 255), thickness)
                    points.append([ndata[2*k],ndata[2*k+1]])
            # 特殊情况 蓝色标出
            else:
                for k in range(len(ndata)/2):
                    cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (255, 0, 0), thickness)
                    points.append([ndata[2*k],ndata[2*k+1]])

            one_shape = {}
            one_shape["line_color"] = None
            one_shape["shape_type"] = "polygon"
            one_shape["points"] = points
            # 判断是否是ignore或者特殊情况,给出flag
            # ignore
            if onedate["result"][key]["tagtype"] in "purpose2":
                one_shape["flags"] = 1
            # add
            elif onedate["result"][key]["tagtype"] in "purpose1":
                one_shape["flags"] = 0
            # 特殊情况
            else:
                one_shape["flags"] = 1

            one_shape["fill_color"] = None
            one_shape["label"] = dataname
            shapes.append(one_shape)

        json_jpg["shapes"] = shapes
        json_jpg["version"] = "3.16.7"
        json_jpg["flags"] = {}
        json_jpg["fillColor"] = [
                                    255, 
                                    0, 
                                    0, 
                                    128
                                ]
        json_jpg["lineColor"] = [
                                    0, 
                                    255, 
                                    0, 
                                    128
                                ]
        json_jpg["imageWidth"] = ww
        json_jpg["imageHeight"] = hh
        #jsonData = json.dumps(json_jpg, ensure_ascii=False, indent=1)
        jsonData = json.dumps(json_jpg, indent=1)
        jsonname = name.split(".")[0]
        jsonname = jsonname+".json"
        #分割的保存
        if tempNum == 1 or tempNum == 2:
            cv2.imwrite(os.path.join(save_test,name),img)
            fileObject = open(os.path.join(json_test,jsonname), 'w')
            fileObject.write(jsonData)
            fileObject.close()
        else:
            cv2.imwrite(os.path.join(save_train,name),img)
            fileObject = open(os.path.join(json_train,jsonname), 'w')
            fileObject.write(jsonData)
            fileObject.close()
    else:
        continue
    txtname = name.split(".")[0]
    train_txt.write(txtname)
    train_txt.write("\n")
    # 将满足整个流程的原始json保存
    savedata.append(onedate)
print("lendata_num:",lendata_num)
train_txt.close()
with open("./saved.json", 'w') as filejson:
    for tempjson in savedata:
        json_str = json.dumps(tempjson)
        filejson.write(json_str)
        filejson.write('\n')

saved_coco_path = "./"
# 创建文件
if not os.path.exists("%scoco/annotations/"%saved_coco_path):
    os.makedirs("%scoco/annotations/"%saved_coco_path)
if not os.path.exists("%scoco/images/train2017/"%saved_coco_path):
    os.makedirs("%scoco/images/train2017"%saved_coco_path)
if not os.path.exists("%scoco/images/val2017/"%saved_coco_path):
    os.makedirs("%scoco/images/val2017"%saved_coco_path)

# 获取images目录下所有的joson文件列表
json_list_train = glob.glob(json_train + "/*.json")
# 获取images目录下所有的joson文件列表
json_list_test = glob.glob(json_test + "/*.json")
print("train_n:", len(json_list_train), 'val_n:', len(json_list_test))

# 把训练集转化为COCO的json格式
if len(json_list_train):
    l2c_train = Lableme2CoCo()
    train_instance = l2c_train.to_coco(json_list_train)
    l2c_train.save_coco_json(train_instance, '%scoco/annotations/instances_train2017.json'%saved_coco_path)
    for file in json_list_train:
        name = file.split('/')[-1]
        name = os.path.join(ori_path,name)
        shutil.copy(name.replace("json",jpgpng),"%scoco/images/train2017/"%saved_coco_path)

if len(json_list_test):
    # 把验证集转化为COCO的json格式
    l2c_val = Lableme2CoCo()
    val_instance = l2c_val.to_coco(json_list_test)
    l2c_val.save_coco_json(val_instance, '%scoco/annotations/instances_val2017.json'%saved_coco_path)
    for file in json_list_test:
        name = file.split('/')[-1]
        name = os.path.join(ori_path,name)
        shutil.copy(name.replace("json",jpgpng),"%scoco/images/val2017/"%saved_coco_path)

合并coco类型的json 

# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
 
import os
import datetime
import random
import sys
import operator
import math
import numpy as np
import skimage.io
import matplotlib
from matplotlib import pyplot as plt
from collections import defaultdict, OrderedDict
import json
#待合并的路径
FILE_DIR = "./coco_all_10.26/annotations/val/"
 
def load_json(filenamejson):
    with open(filenamejson, 'r') as f:
        raw_data = json.load(f)
    return raw_data
 
file_count = 0
files = os.listdir(FILE_DIR)
 
 
root_data = {}
annotations_num = 0
images_num = 0
annotations_id = []
images=[]
 
 
for x in range(len(files)):
    #解析文件名字和后缀
    #print(str(files[x]))
    file_suffix = str(files[x]).split(".")[1]
    file_name = str(files[x]).split(".")[0]
    #过滤类型不对的文件
    if file_suffix not in "json":
        continue
    #json文件计数
    file_count = file_count + 1
    #组合文件路径
    filenamejson = FILE_DIR + str(files[x])
    print(filenamejson)
 
    #读取文件
    if x == 0:
        #第一个文件作为root
        root_data = load_json(filenamejson)
        #为了方便直接在第一个json的id最大值基础上进行累加新的json的id
        annotations_num = len(root_data['annotations'])
        images_num = len(root_data['images'])
        #拿到root的id
        for key1 in range(annotations_num):
            annotations_id.append(int(root_data['annotations'][key1]['id']))
        for key2 in range(images_num):
            images.append(int(root_data['images'][key2]['id']))
        print("{0}生成的json有 {1} 个图片".format(x, len(root_data['images'])))
        print("{0}生成的json有 {1} 个annotation".format(x, len(root_data['annotations'])))
    else:
        #载入新的json
        raw_data = load_json(filenamejson)
        next_annotations_num = len(raw_data['annotations'])
        next_images_num = len(raw_data['images'])
        categories_num = len(raw_data['categories'])
 
        print("{0}生成的json有 {1} 个图片".format(x, len(raw_data['images'])))
        print("{0}生成的json有 {1} 个annotation".format(x, len(raw_data['annotations'])))
 
        #对于image-list进行查找新id且不存在id库,直到新的id出现并分配
        old_imageid = []
        new_imageid = []
        for i in range(next_images_num):
            #追加images的数据
            while(images_num in images):
                images_num += 1
            #将新的id加入匹配库,防止重复
            images.append(images_num)
            #保存新旧id的一一对应关系,方便annotations替换image_id
            old_imageid.append(int(raw_data['images'][i]['id']))
            new_imageid.append(images_num)
            #使用新id
            raw_data['images'][i]['id'] = images_num
            root_data['images'].append(raw_data['images'][i])
 
        #对于annotations-list进行查找新id且不存在id库,直到新的id出现并分配
        for i in range(next_annotations_num):
            #追加annotations的数据
            while(annotations_num in annotations_id):
                annotations_num += 1
            #将新的annotations_id加入匹配库,防止重复
            annotations_id.append(annotations_num)
            #使用新id
            raw_data['annotations'][i]['id'] = annotations_num
            #查到该annotation对应的image_id,并将其替换为已经更新后的image_id
            ind = int(raw_data['annotations'][i]['image_id'])
            #新旧image_id一一对应,通过index旧id取到新id
            try:
                index = old_imageid.index(ind)
            except ValueError as e:
                print("error")
                exit()
            imgid = new_imageid[index]
            raw_data['annotations'][i]['image_id'] = imgid
            root_data['annotations'].append(raw_data['annotations'][i])
    
        #统计这个文件的类别数--可能会重复,要剔除
        #这里我的,categories-id在多个json文件下是一样的,所以没做处理
        raw_categories_count = str(raw_data["categories"]).count('name',0,len(str(raw_data["categories"])))
        for j in range(categories_num):
            root_data["categories"].append(raw_data['categories'][j])
#统计根文件类别数
temp = []
for m in root_data["categories"]:
    if m not in temp:
        temp.append(m)
root_data["categories"] = temp
 
 
 
 
print("共处理 {0} 个json文件".format(file_count))
print("共找到 {0} 个类别".format(str(root_data["categories"]).count('name',0,len(str(root_data["categories"])))))
 
print("最终生成的json有 {0} 个图片".format(len(root_data['images'])))
print("最终生成的json有 {0} 个annotation".format(len(root_data['annotations'])))
 
 
 
json_str = json.dumps(root_data, ensure_ascii=False, indent=1)
#json_str = json.dumps(root_data)
with open('./coco_all_10.26/annotations/instances_val2017.json', 'w') as json_file:
        json_file.write(json_str)
#写出合并文件
 
print("Done!") 

合并coco类型的json 

修改版:::::

# -*- coding: utf-8 -*-
import sys, getopt
import os
import json
import cv2
import random
import numpy as np
np.random.seed(41)
import glob
import shutil

#../../anaconda3_py3.7/bin/python json_all.py -i ./2019years_shachepian/coco -j ./break_pad_20191130_20200121_20200206_155415/coco -o ./coco_all


def hebing_json_coco_file(inputfiles, outputfile, flag):

    annotations_id = []
    images_id = []

    annotations_num = 0
    images_num = 0

    for x in range(len(inputfiles)):
        if x == 0:
            # 第一个文件作为root
            root_data = load_json(inputfiles[x])
            annotations_num = len(root_data['annotations'])
            images_num = len(root_data['images'])
            print('\n%s, root, annotations_num为:'%inputfiles[x], annotations_num)
            print('\n%s, root, images_num:'%inputfiles[x], images_num)

            for key1 in range(annotations_num):
                annotations_id.append(int(root_data['annotations'][key1]['id']))
            for key2 in range(images_num):
                images_id.append(int(root_data['images'][key2]['id']))

        else:
            raw_data = load_json(inputfiles[x])

            next_annotations_num = len(raw_data['annotations'])
            next_images_num = len(raw_data['images'])
            categories_num = len(raw_data['categories'])

            print('\n%s, next, annotations_num为:'%inputfiles[x], next_annotations_num)
            print('\n%s, next, images_num:'%inputfiles[x], next_images_num)

            old_imageid = []
            new_imageid = []

            #插空获得不同的id
            for i in range(next_images_num):
                # 追加images的数据
                while (images_num in images_id):
                    images_num += 1
                # 将新的id加入匹配库,防止重复
                images_id.append(images_num)
                # 保存新旧id的一一对应关系,方便annotations替换image_id
                old_imageid.append(int(raw_data['images'][i]['id']))
                new_imageid.append(images_num)
                # 使用新id
                raw_data['images'][i]['id'] = images_num
                root_data['images'].append(raw_data['images'][i])

            for i in range(next_annotations_num):
                # 追加annotations的数据
                while (annotations_num in annotations_id):
                    annotations_num += 1
                # 将新的annotations_id加入匹配库,防止重复
                annotations_id.append(annotations_num)
                # 使用新id
                raw_data['annotations'][i]['id'] = annotations_num
                # 查到该annotation对应的image_id,并将其替换为已经更新后的image_id
                ind = int(raw_data['annotations'][i]['image_id'])
                # 新旧image_id一一对应,通过index旧id取到新id
                try:
                    index = old_imageid.index(ind)
                except ValueError as e:
                    print("error")
                    exit()
                imgid = new_imageid[index]
                raw_data['annotations'][i]['image_id'] = imgid
                root_data['annotations'].append(raw_data['annotations'][i])

            # 统计这个文件的类别数--可能会重复,要剔除
            # 这里我的,categories-id在多个json文件下是一样的,所以没做处理
            raw_categories_count = str(raw_data["categories"]).count('name', 0, len(str(raw_data["categories"])))
            for j in range(categories_num):
                root_data["categories"].append(raw_data['categories'][j])

    # 统计根文件类别数
    temp = []
    for m in root_data["categories"]:
        if m not in temp:
            temp.append(m)
    root_data["categories"] = temp

    print("\ncategories:\n")
    print("\n", temp, "\n")

    print("最终生成的json有 {0} 个图片".format(len(root_data['images'])))
    print("最终生成的json有 {0} 个annotation".format(len(root_data['annotations'])))

    json_str = json.dumps(root_data, ensure_ascii=False, indent=1)
    # json_str = json.dumps(root_data)
    savepath = '%s/annotations/instances_%s2017.json' % (outputfile, flag)
    #print('\nsavepath:', savepath)
    with open(savepath, 'w') as json_file:
        json_file.write(json_str)


def load_json(filenamejson):
    with open(filenamejson, 'r') as f:
        raw_data = json.load(f)
    return raw_data

def mkdir_os(path):
    if not os.path.exists(path):
        os.makedirs(path)

def main(argv):
    inputFlag = 1
    if inputFlag:
        inputfile1 = ''
        outputfile = ''

        try:
            opts, args = getopt.getopt(argv,"hi:o:",["ifile1=", "ofile="])
        except getopt.GetoptError:
            print('test.py -i <inputfile1> -o <outputfile>')
            sys.exit(2)
        for opt, arg in opts:
            if opt == '-h':
                print('test.py -i <inputfile1> -o <outputfile>')
                sys.exit()
            elif opt in ("-i", "--ifile1"):
                inputfile1 = arg
            elif opt in ("-o", "--ofile"):
                outputfile = arg
    else:
        inputfile1 = './aaaaa'
        outputfile = './coco_all'

    print('\n输入的文件1为:', inputfile1)
    print('\n输出的文件为:', outputfile)
    mkdir_os(outputfile)

    if not os.path.exists(inputfile1):
        print('\n没有输入的文件1为:', inputfile1)
        exit()

    trainimg = os.listdir(inputfile1)

    valfiles = []
    for lab in trainimg:
        if ".json" in lab:
            valfiles.append(os.path.join(inputfile1, lab))
    # 创建文件
    if not os.path.exists("%s/annotations/"%outputfile):
        os.makedirs("%s/annotations/"%outputfile)
    if not os.path.exists("%s/val2017/"%outputfile):
        os.makedirs("%s/val2017"%outputfile)

    hebing_json_coco_file(valfiles, outputfile, "val")

if __name__ == "__main__":
   main(sys.argv[1:])
   print("Done!")

 

最新修改,把上述py的功能融合到一起:

# -*- coding:utf-8 -*-
#https://blog.csdn.net/u010103202/article/details/81635436
#labelme标注的数据分析
#https://blog.csdn.net/wc781708249/article/details/79595174
#数据先转换成lableme,然后调用labelme的函数转换为VOC
#https://github.com/wkentaro/labelme/tree/master/examples/semantic_segmentation


import sys
reload(sys)
sys.setdefaultencoding('utf8')
import argparse
import json
import os
import os.path as osp
import warnings
import copy
import numpy as np
import PIL.Image
from skimage import io
import yaml
import cv2
import random
import glob
import shutil
from sklearn.model_selection import train_test_split
np.random.seed(41)

def mkdir_os(path):
    if not os.path.exists(path):
        os.makedirs(path)

# 数据地址
orijsonpath = "shachepian20191014_2_20191018_133128.json"
dataname = "shachepian" #--------------------
#0为背景
classname_to_id = {dataname: 1}
ori_path = "./images_shachepian"
ori_path_new = "./images_transpose"
save_train = "./seg/train"
save_test = "./seg/test"
json_train = "./json/train"
json_test = "./json/test"
mkdir_os(ori_path_new)
mkdir_os(save_train)
mkdir_os(save_test)
mkdir_os(json_train)
mkdir_os(json_test)
train_txt = open('train.txt',"wr")

#coco数据类型转换
class Lableme2CoCo:
    def __init__(self):
        self.images = []
        self.annotations = []
        self.categories = []
        self.img_id = 0
        self.ann_id = 0

    def save_coco_json(self, instance, save_path):
        import io
        #json.dump(instance, io.open(save_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=1)  # indent=2 更加美观显示
        with io.open(save_path, 'w', encoding="utf-8") as outfile:
            my_json_str = json.dumps(instance, ensure_ascii=False, indent=1)
            if isinstance(my_json_str, str):
                my_json_str = my_json_str.decode("utf-8")
            outfile.write(my_json_str)

    # 由json文件构建COCO
    def to_coco(self, json_path_list, ori_path_new):
        self._init_categories()
        for json_path in json_path_list:
            obj = self.read_jsonfile(json_path)
            self.images.append(self._image(obj, json_path, ori_path_new))
            shapes = obj['shapes']
            for shape in shapes:
                annotation = self._annotation(shape)
                self.annotations.append(annotation)
                self.ann_id += 1
            self.img_id += 1
        instance = {}
        instance['info'] = 'spytensor created'
        instance['license'] = ['license']
        instance['images'] = self.images
        instance['annotations'] = self.annotations
        instance['categories'] = self.categories
        return instance

    # 构建类别
    def _init_categories(self):
        for k, v in classname_to_id.items():
            category = {}
            category['id'] = v
            category['name'] = k
            self.categories.append(category)

    # 构建COCO的image字段
    def _image(self, obj, path, ori_path_new):
        image = {}
        from labelme import utils
        #img_x = utils.img_b64_to_arr(obj['imageData'])
        #---------------------------------------
        print(str(obj['imagePath']))
        name = str(obj['imagePath']).split('/')[-1]
        newname = os.path.join(ori_path_new,name)
        img_x = cv2.imread(newname)
        h, w = img_x.shape[:-1]
        image['height'] = h
        image['width'] = w
        image['id'] = self.img_id
        image['file_name'] = os.path.basename(path).replace("json", jpgpng)
        return image

    # 构建COCO的annotation字段
    def _annotation(self, shape):
        label = shape['label']
        points = shape['points']
        annotation = {}
        annotation['id'] = self.ann_id
        annotation['image_id'] = self.img_id
        annotation['category_id'] = int(classname_to_id[label])
        annotation['segmentation'] = [np.asarray(points).flatten().tolist()]
        annotation['bbox'] = self._get_box(points)
        if int(shape['flags']) == 0:
            annotation['iscrowd'] = 0
            #annotation['ignore'] = 0
        elif int(shape['flags']) == 1:
            annotation['iscrowd'] = 1
            #annotation['ignore'] = 1
            
        annotation['area'] = 1.0
        return annotation

    # 读取json文件,返回一个json对象
    def read_jsonfile(self, path):
        import io
        #with io.open(path, "r", encoding='utf-8') as f:
        with open(path, "r") as f:
            return json.load(f)

    # COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式
    def _get_box(self, points):
        min_x = min_y = np.inf
        max_x = max_y = 0
        for x, y in points:
            min_x = min(min_x, x)
            min_y = min(min_y, y)
            max_x = max(max_x, x)
            max_y = max(max_y, y)
        return [min_x, min_y, max_x - min_x, max_y - min_y]

# 获取数据
data = []
savedata = []
# 判断是否已经处理过
if os.path.exists("./saved.json")==True:
    with open("./saved.json") as f:
        for line in f:
            data.append(json.loads(line))
else:
    with open(orijsonpath) as f:
        for line in f:
            data.append(json.loads(line))
num = 0
lendata_num = 0
count = len(data)
trainimg = os.listdir(ori_path)
# 遍历获取的数据
for lab in range(count):
    num += 1
    #print num,"/",count
    onedate = data[lab]
    name = onedate["url_image"]
    name = str(name).split("/")[-1]
    jpgpng = name.split(".")[-1]
    if name not in trainimg:
        continue
    img = cv2.imread(os.path.join(ori_path,name))
    if img is None:
        continue
    temp_hh,temp_ww,c = img.shape
    hh = temp_hh
    ww = temp_ww
    if temp_hh > temp_ww:
        srcCopy = cv2.transpose(img);  
        img = cv2.flip(srcCopy, 0)
        tempimg = copy.deepcopy(img)
        cv2.imwrite(os.path.join(ori_path_new,name),tempimg)
        hh,ww,c = img.shape
    else:
        cv2.imwrite(os.path.join(ori_path_new,name),img)
    
    tempNum = random.randint(1,10)
    point_size = 3
    thickness = 4
    if(len(onedate["result"])==0):
        continue
    if 'data' in onedate["result"] or 'data' in onedate["result"][0]:
        
        json_jpg={}
        json_jpg["imagePath"] = str(os.path.join(ori_path_new,name))
        json_jpg["imageData"] = None
        shapes=[]

        for key in range(len(onedate["result"])):
            ndata = onedate["result"][key]["data"]
            if len(ndata)< 8:
                continue
            # 判断,如果图片是立着的,放平它,保证,宽大高小
            if temp_hh > temp_ww:
                for k in range(len(ndata)/2):
                    temp1 = ndata[2*k]
                    temp2 = ndata[2*k+1]
                    ndata[2*k]= temp2
                    ndata[2*k+1]= temp_ww - temp1 -1
            # 修改原始,方便保存
            onedate["result"][key]["data"] = ndata

            points=[]
            # ignore 黄色标出
            if onedate["result"][key]["tagtype"] in "purpose2":
                for k in range(len(ndata)/2):
                    cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (0, 255, 255), thickness)
                    points.append([ndata[2*k],ndata[2*k+1]])
            # add 红色标出
            elif onedate["result"][key]["tagtype"] in "purpose1":
                for k in range(len(ndata)/2):
                    cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (0, 0, 255), thickness)
                    points.append([ndata[2*k],ndata[2*k+1]])
            # 特殊情况 蓝色标出
            else:
                for k in range(len(ndata)/2):
                    cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (255, 0, 0), thickness)
                    points.append([ndata[2*k],ndata[2*k+1]])

            one_shape = {}
            one_shape["line_color"] = None
            one_shape["shape_type"] = "polygon"
            one_shape["points"] = points
            # 判断是否是ignore或者特殊情况,给出flag
            # ignore
            if onedate["result"][key]["tagtype"] in "purpose2":
                one_shape["flags"] = 1
            # add
            elif onedate["result"][key]["tagtype"] in "purpose1":
                one_shape["flags"] = 0
            # 特殊情况
            else:
                one_shape["flags"] = 1

            one_shape["fill_color"] = None
            one_shape["label"] = dataname
            shapes.append(one_shape)

        json_jpg["shapes"] = shapes
        json_jpg["version"] = "3.16.7"
        json_jpg["flags"] = {}
        json_jpg["fillColor"] = [
                                    255, 
                                    0, 
                                    0, 
                                    128
                                ]
        json_jpg["lineColor"] = [
                                    0, 
                                    255, 
                                    0, 
                                    128
                                ]
        json_jpg["imageWidth"] = ww
        json_jpg["imageHeight"] = hh
        #jsonData = json.dumps(json_jpg, ensure_ascii=False, indent=1)
        jsonData = json.dumps(json_jpg, indent=1)
        jsonname = name.split(".")[0]
        jsonname = jsonname+".json"
        #分割的保存
        if tempNum == 1 or tempNum == 2:
            cv2.imwrite(os.path.join(save_test,name),img)
            fileObject = open(os.path.join(json_test,jsonname), 'w')
            fileObject.write(jsonData)
            fileObject.close()
        else:
            cv2.imwrite(os.path.join(save_train,name),img)
            fileObject = open(os.path.join(json_train,jsonname), 'w')
            fileObject.write(jsonData)
            fileObject.close()
    else:
        continue
    txtname = name.split(".")[0]
    train_txt.write(txtname)
    train_txt.write("\n")
    # 将满足整个流程的原始json保存
    savedata.append(onedate)
print("lendata_num:",lendata_num)
train_txt.close()
with open("./saved.json", 'w') as filejson:
    for tempjson in savedata:
        json_str = json.dumps(tempjson)
        filejson.write(json_str)
        filejson.write('\n')

saved_coco_path = "./"
# 创建文件
if not os.path.exists("%scoco/annotations/"%saved_coco_path):
    os.makedirs("%scoco/annotations/"%saved_coco_path)
if not os.path.exists("%scoco/images/train2017/"%saved_coco_path):
    os.makedirs("%scoco/images/train2017"%saved_coco_path)
if not os.path.exists("%scoco/images/val2017/"%saved_coco_path):
    os.makedirs("%scoco/images/val2017"%saved_coco_path)

# 获取images目录下所有的joson文件列表
json_list_train = glob.glob(json_train + "/*.json")
# 获取images目录下所有的joson文件列表
json_list_test = glob.glob(json_test + "/*.json")
print("train_n:", len(json_list_train), 'val_n:', len(json_list_test))

# 把训练集转化为COCO的json格式
if len(json_list_train):
    l2c_train = Lableme2CoCo()
    train_instance = l2c_train.to_coco(json_list_train, ori_path_new)
    l2c_train.save_coco_json(train_instance, '%scoco/annotations/instances_train2017.json'%saved_coco_path)
    for file in json_list_train:
        name = file.split('/')[-1]
        name = os.path.join(ori_path_new,name)
        shutil.copy(name.replace("json", jpgpng),"%scoco/images/train2017/"%saved_coco_path)

if len(json_list_test):
    # 把验证集转化为COCO的json格式
    l2c_val = Lableme2CoCo()
    val_instance = l2c_val.to_coco(json_list_test, ori_path_new)
    l2c_val.save_coco_json(val_instance, '%scoco/annotations/instances_val2017.json'%saved_coco_path)
    for file in json_list_test:
        name = file.split('/')[-1]
        name = os.path.join(ori_path_new,name)
        shutil.copy(name.replace("json", jpgpng),"%scoco/images/val2017/"%saved_coco_path)

 

 

 

 

最近修改版本

-----------------------

生成coco:

# -*- coding: utf-8 -*-
import sys, getopt
import os
import json
import cv2
import random
import numpy as np
np.random.seed(41)
import glob
import shutil

#../../../anaconda3_py3.7/bin/python main.py -i break_pad_20191130_20200121 -j break_pad_20191130_20200121_20200206_155415.json -c shachepian -o ./coco

#shachepian
#./tools/dist_train.sh configs/mask_rcnn_r50_fpn_1x.py 2 0,1 hsr_break_pad_20191130_20200121.log
#marker
#./tools/dist_train.sh configs/mask_rcnn_r50_fpn_1x.py 2 0,1 hsr_break_pad_20191130_20200121.log
#wires
#./tools/dist_train.sh configs/faster_rcnn_r50_fpn_1x.py 2 0,1 hsr_break_pad_20191130_20200121.log

#./tools/dist_test.sh configs/mask_rcnn_r50_fpn_1x.py work_dirs/mask_rcnn_r50_fpn_1x/epoch_380.pth 1 --eval bbox segm --out temp_pred/maskrcnn_out.pkl

'''
#!/usr/bin/env bash

#PYTHON=${PYTHON:-"python"}
PYTHON=/root/train/results/new/anaconda3_py3.7/bin/python
CONFIG=$1
GPUS=$2
GPUNAME=$3
OUTNAME=$4
nohup $PYTHON -m torch.distributed.launch --nproc_per_node=$GPUS \
    $(dirname "$0")/train.py $CONFIG --gpus $GPUNAME  --launcher pytorch ${@:4}  > $OUTNAME 2>&1 &
'''

def mkdir_os(path):
    if not os.path.exists(path):
        os.makedirs(path)

#coco数据类型转换
class Lableme2CoCo:
    def __init__(self, classname_to_id, jpgpng):
        self.images = []
        self.annotations = []
        self.categories = []
        self.img_id = 0
        self.ann_id = 0
        self.classname_to_id = classname_to_id
        self.jpgpng = jpgpng

    def save_coco_json(self, instance, save_path):
        import io
        #json.dump(instance, io.open(save_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=1)  # indent=2 更加美观显示
        with io.open(save_path, 'w', encoding="utf-8") as outfile:
            my_json_str = json.dumps(instance, ensure_ascii=False, indent=1)
            #python3 无
            # if isinstance(my_json_str, str):
            #     my_json_str = my_json_str.decode("utf-8")
            outfile.write(my_json_str)

    # 由json文件构建COCO
    def to_coco(self, json_path_list, ori_path_new):
        self._init_categories()
        for json_path in json_path_list:
            obj = self.read_jsonfile(json_path)
            self.images.append(self._image(obj, json_path, ori_path_new))
            shapes = obj['shapes']
            for shape in shapes:
                annotation = self._annotation(shape)
                self.annotations.append(annotation)
                self.ann_id += 1
            self.img_id += 1

        instance = {}
        instance['info'] = 'spytensor created'
        instance['license'] = ['license']
        instance['images'] = self.images
        instance['annotations'] = self.annotations
        instance['categories'] = self.categories
        return instance

    # 构建类别
    def _init_categories(self):
        for k, v in self.classname_to_id.items():
            category = {}
            category['id'] = v
            category['name'] = k
            self.categories.append(category)

    # 构建COCO的image字段
    def _image(self, obj, path, ori_path):
        image = {}
        #from labelme import utils
        #img_x = utils.img_b64_to_arr(obj['imageData'])
        #---------------------------------------
        #print(str(obj['imagePath']))
        name = str(obj['imagePath']).split('/')[-1]
        newname = os.path.join(ori_path,name)
        img_x = cv2.imread(newname)
        if img_x is None:
            print('\nLableme2CoCo--error:')
            exit()
        h, w = img_x.shape[:-1]
        image['height'] = h
        image['width'] = w
        image['id'] = self.img_id
        image['file_name'] = os.path.basename(path).replace("json", self.jpgpng)
        return image

    # 构建COCO的annotation字段
    def _annotation(self, shape):
        label = shape['label']
        points = shape['points']
        annotation = {}
        annotation['id'] = self.ann_id
        annotation['image_id'] = self.img_id
        annotation['category_id'] = int(self.classname_to_id[label])
        annotation['segmentation'] = [np.asarray(points).flatten().tolist()]
        annotation['bbox'] = self._get_box(points)
        if int(shape['flags']) == 0:
            annotation['iscrowd'] = 0
            #annotation['ignore'] = 0
        elif int(shape['flags']) == 1:
            annotation['iscrowd'] = 1
            #annotation['ignore'] = 1

        annotation['area'] = 1.0
        return annotation

    # 读取json文件,返回一个json对象
    def read_jsonfile(self, path):
        import io
        #with io.open(path, "r", encoding='utf-8') as f:
        with open(path, "r") as f:
            return json.load(f)

    # COCO的格式: [x1,y1,w,h] 对应COCO的bbox格式
    def _get_box(self, points):
        min_x = min_y = np.inf
        max_x = max_y = 0
        for x, y in points:
            min_x = min(min_x, x)
            min_y = min(min_y, y)
            max_x = max(max_x, x)
            max_y = max(max_y, y)
        return [min_x, min_y, max_x - min_x, max_y - min_y]



def main(argv):

    inputFlag = 0
    process2LabelmeFlag = 0

    if inputFlag:
        inputfile = ''
        jsonfile = ''
        classname = ''
        outputfile = ''

        try:
            opts, args = getopt.getopt(argv,"hi:j:c:o:",["ifile=","jfile=","cname=","ofile="])
        except getopt.GetoptError:
            print('test.py -i <inputfile> -j <jsonfile> -c <classname> -o <outputfile>')
            sys.exit(2)
        for opt, arg in opts:
            if opt == '-h':
                print('test.py -i <inputfile> -j <jsonfile> -c <classname> -o <outputfile>')
                sys.exit()
            elif opt in ("-i", "--ifile"):
                inputfile = arg
            elif opt in ("-j", "--jfile"):
                jsonfile = arg
            elif opt in ("-c", "--cname"):
                classname = arg
            elif opt in ("-o", "--ofile"):
                outputfile = arg
    else:
        inputfile = './break_pad_20191130_20200121'
        jsonfile = 'break_pad_20191130_20200121_20200206_155415.json'
        classname = 'shachepian'
        outputfile = './coco_3'

    print('\n输入的文件为:', inputfile)
    print('\n输入的json为:', jsonfile)
    print('\nclassname为:', classname)
    print('\n输出的文件为:', outputfile)

    save_train = "./seg/train"
    save_test = "./seg/test"
    json_train = "./json/train"
    json_test = "./json/test"
    mkdir_os(save_train)
    mkdir_os(save_test)
    mkdir_os(json_train)
    mkdir_os(json_test)

    mkdir_os(outputfile)

    #0为背景
    classname_to_id = {classname: 1}


    if process2LabelmeFlag:
        # 获取数据
        data = []
        with open(jsonfile) as f:
            for line in f:
                data.append(json.loads(line))
        num = 0
        lendata_num = 0
        count = len(data)
        trainimg = os.listdir(inputfile)
        # 遍历获取的数据
        train_txt = open('train.txt',"w")
        for lab in range(count):
            onedate = data[lab]
            name = onedate["url_image"]
            name = str(name).split("/")[-1]
            jpgpng = name.split(".")[-1]
            if name not in trainimg:
                if name.replace(".jpg",".png") not in trainimg:
                    continue
                else:
                    name = name.replace(".jpg",".png")
                    jpgpng = "png"
            img = cv2.imread(os.path.join(inputfile,name))
            if img is None:
                continue
            temp_hh,temp_ww = img.shape[:2]
            hh = temp_hh
            ww = temp_ww
            if temp_hh > temp_ww:
                print("error: temp_hh > temp_ww\n")
                exit()

            #train-test 随机数
            tempNum = random.randint(1,10)

            point_size = 3
            thickness = 4
            if(len(onedate["result"])==0):
                continue
            if 'data' in onedate["result"] or 'data' in onedate["result"][0]:

                json_jpg={}
                json_jpg["imagePath"] = str(os.path.join(inputfile,name))
                json_jpg["imageData"] = None
                shapes=[]

                for key in range(len(onedate["result"])):
                    ndata = onedate["result"][key]["data"]
                    if len(ndata)< 8:
                        continue
                    points=[]
                    # ignore 黄色标出
                    if onedate["result"][key]["tagtype"] in "purpose2":
                        for k in range(len(ndata)//2):
                            cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (0, 255, 255), thickness)
                            points.append([ndata[2*k],ndata[2*k+1]])
                    # add 红色标出
                    elif onedate["result"][key]["tagtype"] in "purpose1":
                        for k in range(len(ndata)//2):
                            cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (0, 0, 255), thickness)
                            points.append([ndata[2*k],ndata[2*k+1]])
                    # 特殊情况 蓝色标出
                    else:
                        for k in range(len(ndata)//2):
                            cv2.circle(img, (ndata[2*k],ndata[2*k+1]), point_size, (255, 0, 0), thickness)
                            points.append([ndata[2*k],ndata[2*k+1]])

                    one_shape = {}
                    one_shape["line_color"] = None
                    one_shape["shape_type"] = "polygon"
                    one_shape["points"] = points
                    # 判断是否是ignore或者特殊情况,给出flag
                    # ignore
                    if onedate["result"][key]["tagtype"] in "purpose2":
                        one_shape["flags"] = 1
                    # add
                    elif onedate["result"][key]["tagtype"] in "purpose1":
                        one_shape["flags"] = 0
                    # 特殊情况
                    else:
                        one_shape["flags"] = 1

                    one_shape["fill_color"] = None
                    one_shape["label"] = classname
                    shapes.append(one_shape)

                json_jpg["shapes"] = shapes
                json_jpg["version"] = "3.16.7"
                json_jpg["flags"] = {}
                json_jpg["fillColor"] = [
                                            255,
                                            0,
                                            0,
                                            128
                                        ]
                json_jpg["lineColor"] = [
                                            0,
                                            255,
                                            0,
                                            128
                                        ]
                json_jpg["imageWidth"] = ww
                json_jpg["imageHeight"] = hh
                #jsonData = json.dumps(json_jpg, ensure_ascii=False, indent=1)
                jsonData = json.dumps(json_jpg, indent=1)
                jsonname = name.split(".")[0]
                jsonname = jsonname+".json"
                #分割的保存
                if tempNum == 1 or tempNum == 2:
                    cv2.imwrite(os.path.join(save_test,name),img)
                    fileObject = open(os.path.join(json_test,jsonname), 'w')
                    fileObject.write(jsonData)
                    fileObject.close()
                else:
                    cv2.imwrite(os.path.join(save_train,name),img)
                    fileObject = open(os.path.join(json_train,jsonname), 'w')
                    fileObject.write(jsonData)
                    fileObject.close()
            else:
                continue
            txtname = name.split(".")[0]
            train_txt.write(txtname)
            train_txt.write("\n")
            num += 1
            print(num,"/",count)
        print("lendata_num:",num)
        train_txt.close()
    else:
        jpgpng = "png"
        print('\njpgpng文件后缀人为设置为,如出bug需要修改:', jpgpng)

    # 创建文件
    if not os.path.exists("%s/annotations/"%outputfile):
        os.makedirs("%s/annotations/"%outputfile)
    if not os.path.exists("%s/train2017/"%outputfile):
        os.makedirs("%s/train2017"%outputfile)
    if not os.path.exists("%s/val2017/"%outputfile):
        os.makedirs("%s/val2017"%outputfile)

    # 获取images目录下所有的joson文件列表
    json_list_train = glob.glob(json_train + "/*.json")
    # 获取images目录下所有的joson文件列表
    json_list_test = glob.glob(json_test + "/*.json")

    print("train_n:", len(json_list_train), 'val_n:', len(json_list_test))

    # 把训练集转化为COCO的json格式
    if len(json_list_train):
        l2c_train = Lableme2CoCo(classname_to_id, jpgpng)
        train_instance = l2c_train.to_coco(json_list_train, inputfile)
        l2c_train.save_coco_json(train_instance, '%s/annotations/instances_train2017.json'%outputfile)
        for file in json_list_train:
            name = file.split('/')[-1]
            name = os.path.join(inputfile,name)
            shutil.copy(name.replace("json", jpgpng),"%s/train2017/"%outputfile)

    if len(json_list_test):
        # 把验证集转化为COCO的json格式
        l2c_val = Lableme2CoCo(classname_to_id, jpgpng)
        val_instance = l2c_val.to_coco(json_list_test, inputfile)
        l2c_val.save_coco_json(val_instance, '%s/annotations/instances_val2017.json'%outputfile)
        for file in json_list_test:
            name = file.split('/')[-1]
            name = os.path.join(inputfile,name)
            shutil.copy(name.replace("json", jpgpng),"%s/val2017/"%outputfile)

if __name__ == "__main__":
   main(sys.argv[1:])

合并两个json

# -*- coding: utf-8 -*-
import sys, getopt
import os
import json
import cv2
import random
import numpy as np
np.random.seed(41)
import glob
import shutil

#../../../anaconda3_py3.7/bin/python main.py -i1 ./coco1 -i2 ./coco2 -o ./coco_all

def hebing_json_coco(inputfiles, outputfile, flag):

    annotations_id = []
    images_id = []

    for x in range(len(inputfiles)):
        if x == 0:
            # 第一个文件作为root
            root_data = load_json(inputfiles[x])
            annotations_num = len(root_data['annotations'])
            images_num = len(root_data['images'])
            print('\n%s, root, annotations_num为:'%inputfiles[x], annotations_num)
            print('\n%s, root, images_num:'%inputfiles[x], images_num)

            for key1 in range(annotations_num):
                annotations_id.append(int(root_data['annotations'][key1]['id']))
            for key2 in range(images_num):
                images_id.append(int(root_data['images'][key2]['id']))

            max_annotations_id = max(annotations_id)
            max_images_id = max(images_id)

            print('\n%s, root, ---------------------max_annotations_id---------------------:'%inputfiles[x], max_annotations_id)
            print('\n%s, root, ---------------------max_images_id---------------------:'%inputfiles[x], max_images_id)

        else:
            raw_data = load_json(inputfiles[x])

            next_annotations_num = len(raw_data['annotations'])
            next_images_num = len(raw_data['images'])
            categories_num = len(raw_data['categories'])

            print('\n%s, two, annotations_num为:'%inputfiles[x], next_annotations_num)
            print('\n%s, two, images_num:'%inputfiles[x], next_images_num)

            old_imageid = []
            new_imageid = []
            for i in range(next_images_num):
                max_images_id += 1
                # 保存新旧id的一一对应关系,方便annotations替换image_id
                old_imageid.append(int(raw_data['images'][i]['id']))
                new_imageid.append(max_images_id)
                # 使用新id
                raw_data['images'][i]['id'] = max_images_id
                root_data['images'].append(raw_data['images'][i])

            for i in range(next_annotations_num):
                max_annotations_id += 1
                # 使用新id
                raw_data['annotations'][i]['id'] = max_annotations_id
                # 查到该annotation对应的image_id,并将其替换为已经更新后的image_id
                ind = int(raw_data['annotations'][i]['image_id'])
                # 新旧image_id一一对应,通过index旧id取到新id
                try:
                    index = old_imageid.index(ind)
                except ValueError as e:
                    print("error")
                    exit()
                imgid = new_imageid[index]
                raw_data['annotations'][i]['image_id'] = imgid
                root_data['annotations'].append(raw_data['annotations'][i])

            # 统计这个文件的类别数--可能会重复,要剔除
            # 这里我的,categories-id在多个json文件下是一样的,所以没做处理
            raw_categories_count = str(raw_data["categories"]).count('name', 0, len(str(raw_data["categories"])))
            for j in range(categories_num):
                root_data["categories"].append(raw_data['categories'][j])

    # 统计根文件类别数
    temp = []
    for m in root_data["categories"]:
        if m not in temp:
            temp.append(m)
    root_data["categories"] = temp

    print("最终生成的json有 {0} 个图片".format(len(root_data['images'])))
    print("最终生成的json有 {0} 个annotation".format(len(root_data['annotations'])))

    json_str = json.dumps(root_data, ensure_ascii=False, indent=1)
    # json_str = json.dumps(root_data)
    savepath = '%s/annotations/instances_%s2017.json' % (outputfile, flag)
    #print('\nsavepath:', savepath)
    with open(savepath, 'w') as json_file:
        json_file.write(json_str)


def load_json(filenamejson):
    with open(filenamejson, 'r') as f:
        raw_data = json.load(f)
    return raw_data

def mkdir_os(path):
    if not os.path.exists(path):
        os.makedirs(path)

def main(argv):
    inputFlag = 0
    if inputFlag:
        inputfile1 = ''
        inputfile2 = ''
        outputfile = ''

        try:
            opts, args = getopt.getopt(argv,"hi1:i2:o:",["ifile1=","ifile2=","ofile="])
        except getopt.GetoptError:
            print('test.py -i1 <inputfile1> -i2 <inputfile2> -o <outputfile>')
            sys.exit(2)
        for opt, arg in opts:
            if opt == '-h':
                print('test.py -i1 <inputfile1> -i2 <inputfile2> -o <outputfile>')
                sys.exit()
            elif opt in ("-i1", "--ifile1"):
                inputfile1 = arg
            elif opt in ("-i2", "--ifile2"):
                inputfile2 = arg
            elif opt in ("-o", "--ofile"):
                outputfile = arg
    else:
        inputfile1 = './coco_1'
        inputfile2 = './coco_2'
        outputfile = './coco_all'

    print('\n输入的文件1为:', inputfile1)
    print('\n输入的文件2为:', inputfile2)
    print('\n输出的文件为:', outputfile)
    mkdir_os(outputfile)

    if not os.path.exists(inputfile1):
        print('\n没有输入的文件1为:', inputfile1)
    if not os.path.exists(inputfile2):
        print('\n没有输入的文件1为:', inputfile2)

    trainfiles = []
    testfiles = []
    trainfiles.append(os.path.join(inputfile1,"annotations/instances_train2017.json"))
    trainfiles.append(os.path.join(inputfile2,"annotations/instances_train2017.json"))

    testfiles.append(os.path.join(inputfile1,"annotations/instances_val2017.json"))
    testfiles.append(os.path.join(inputfile2,"annotations/instances_val2017.json"))


    # 创建文件
    if not os.path.exists("%s/annotations/"%outputfile):
        os.makedirs("%s/annotations/"%outputfile)
    if not os.path.exists("%s/train2017/"%outputfile):
        os.makedirs("%s/train2017"%outputfile)
    if not os.path.exists("%s/val2017/"%outputfile):
        os.makedirs("%s/val2017"%outputfile)

    hebing_json_coco(trainfiles, outputfile, "train")
    hebing_json_coco(testfiles, outputfile, "test")



if __name__ == "__main__":
   main(sys.argv[1:])
   print("Done!")

 

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值