tensonflow YOLO v3 训练自己的数据集(VOC版本)

tensonflow YOLO v3 这个项目主要针对的是voc格式的数据集(也就是标签为XML格式的),如果我们的标签是XML格式的,那么就可以跳过下一步直接执行第二步,如果我们的标签是json格式的,那么我们需要把json格式的coco数据集转换成voc格式的:

"""
把coco数据集合的所有标注转换到voc格式,不改变图片命名方式,
注意,原来有一些图片是黑白照片,检测出不是 RGB 图像,这样的图像不会被放到新的文件夹中
更新日期:2020-6-23
"""
from pycocotools.coco import COCO  # 这个包可以从git上下载https://github.com/cocodataset/cocoapi/tree/master/PythonAPI,也可以直接用修改后的coco.py
import os, cv2, shutil
from lxml import html,objectify
from tqdm import tqdm
from PIL import Image

CKimg_dir = '/home/zhp/Downloads/coco_to_voc/images'
CKanno_dir = '/home/zhp/Downloads/coco_to_voc/annotations'


# 若模型保存文件夹不存在,创建模型保存文件夹,若存在,删除重建
def mkr(path):
    if os.path.exists(path):
        shutil.rmtree(path)
        os.mkdir(path)
    else:
        os.mkdir(path)


def save_annotations(filename, objs, filepath):
    annopath = CKanno_dir + "/" + filename[:-3] + "xml"  # 生成的xml文件保存路径
    dst_path = CKimg_dir + "/" + filename
    img_path = filepath
    img = cv2.imread(img_path)
    im = Image.open(img_path)
    if im.mode != "RGB":
        print(filename + " not a RGB image")
        im.close()
        return
    im.close()
    shutil.copy(img_path, dst_path)  # 把原始图像复制到目标文件夹
    E = objectify.ElementMaker(annotate=False)
    anno_tree = E.annotation(
        E.folder('1'),
        E.filename(filename),
        E.source(
            E.database('CKdemo'),
            E.annotation('VOC'),
            E.image('CK')
        ),
        E.size(
            E.width(img.shape[1]),
            E.height(img.shape[0]),
            E.depth(img.shape[2])
        ),
        E.segmented(0)
    )
    for obj in objs:
        E2 = objectify.ElementMaker(annotate=False)
        anno_tree2 = E2.object(
            E.name(obj[0]),
            E.pose(),
            E.truncated("0"),
            E.difficult(0),
            E.bndbox(
                E.xmin(obj[2]),
                E.ymin(obj[3]),
                E.xmax(obj[4]),
                E.ymax(obj[5])
            )
        )
        anno_tree.append(anno_tree2)
    html.etree.ElementTree(anno_tree).write(annopath, pretty_print=True)


def showbycv(coco, dataType, img, classes, origin_image_dir, verbose=False):
    filename = img['file_name']
    filepath = os.path.join(origin_image_dir, dataType, filename)
    I = cv2.imread(filepath)
    annIds = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
    anns = coco.loadAnns(annIds)
    objs = []
    for ann in anns:
        name = classes[ann['category_id']]
        if 'bbox' in ann:
            bbox = ann['bbox']
            xmin = (int)(bbox[0])
            ymin = (int)(bbox[1])
            xmax = (int)(bbox[2] + bbox[0])
            ymax = (int)(bbox[3] + bbox[1])
            obj = [name, 1.0, xmin, ymin, xmax, ymax]
            objs.append(obj)
            if verbose:
                cv2.rectangle(I, (xmin, ymin), (xmax, ymax), (255, 0, 0))
                cv2.putText(I, name, (xmin, ymin), 3, 1, (0, 0, 255))
    save_annotations(filename, objs, filepath)
    if verbose:
        cv2.imshow("img", I)
        cv2.waitKey(0)


def catid2name(coco):  # 将名字和id号建立一个字典
    classes = dict()
    for cat in coco.dataset['categories']:
        classes[cat['id']] = cat['name']
        # print(str(cat['id'])+":"+cat['name'])
    return classes


def get_CK5(origin_anno_dir, origin_image_dir, verbose=False):
    dataTypes = ['train2014', 'val2014']
    for dataType in dataTypes:
        annFile = 'instances_{}.json'.format(dataType)
        annpath = os.path.join(origin_anno_dir, annFile)
        coco = COCO(annpath)
        classes = catid2name(coco)
        imgIds = coco.getImgIds()
        # imgIds=imgIds[0:1000]#测试用,抽取10张图片,看下存储效果
        for imgId in tqdm(imgIds):
            img = coco.loadImgs(imgId)[0]
            showbycv(coco, dataType, img, classes, origin_image_dir, verbose=False)


def main():
    base_dir = '/home/zhp/Downloads/coco_to_voc'  # step1 这里是一个新的文件夹,存放转换后的图片和标注
    image_dir = os.path.join(base_dir, 'images')  # 在上述文件夹中生成images,annotations两个子文件夹
    anno_dir = os.path.join(base_dir, 'annotations')
    mkr(image_dir)
    mkr(anno_dir)
    origin_image_dir = '/home/zhp/Downloads/coco/'  # step 2原始的coco的图像存放位置
    origin_anno_dir = '/home/zhp/Downloads/coco/annotations'  # step 3 原始的coco的标注存放位置
    verbose = True  # 是否需要看下标记是否正确的开关标记,若是true,就会把标记展示到图片上
    get_CK5(origin_anno_dir, origin_image_dir, verbose)


if __name__ == "__main__":
    main()

    # split_traintest()

按照以上注释修改路径就可以生成voc格式的数据集。

第二步,先按照VOC数据官网的格式建立一下格式的目录:

.
└── VOCdevkit     #根目录
    └── VOC2007   #不同年份的数据集,这里只下载了2007的,还有2012等其它年份的
        ├── Annotations        #存放xml文件,与JPEGImages中的图片一一对应,解释图片的内容等等
        ├── ImageSets          #该目录下存放的都是txt文件,txt文件中每一行包含一个图片的名称,末尾会加上±1表示正负样本
        │   ├── Action
        │   ├── Layout
        │   ├── Main
        │   └── Segmentation
        ├── JPEGImages         #存放源图片
        ├── SegmentationClass  #存放的是图片,语义分割相关
        └── SegmentationObject #存放的是图片,实例分割相关

第三步,把自己标记好的标签和对应的图片复制粘贴到刚刚新建的Annotations和JPEGImages文件夹下,这里就不贴图了。

第四步,新建一个Python文件,这里我起的名字是split.py,代码如下:

import os
import random
import sys

if len(sys.argv) < 2:
    print("no directory specified, please input target directory")
    exit()

root_path = sys.argv[1]

xmlfilepath = root_path + '/Annotations'

txtsavepath = root_path + '/ImageSets/Main'

if not os.path.exists(root_path):
    print("cannot find such directory: " + root_path)
    exit()

if not os.path.exists(txtsavepath):
    os.makedirs(txtsavepath)

trainval_percent = 0.9
train_percent = 0.8
total_xml = os.listdir(xmlfilepath)
num = len(total_xml)
list = range(num)
tv = int(num * trainval_percent)
tr = int(tv * train_percent)
trainval = random.sample(list, tv)
train = random.sample(trainval, tr)

print("train and val size:", tv)
print("train size:", tr)

ftrainval = open(txtsavepath + '/trainval.txt', 'w')
ftest = open(txtsavepath + '/test.txt', 'w')
ftrain = open(txtsavepath + '/train.txt', 'w')
fval = open(txtsavepath + '/val.txt', 'w')

for i in list:
    name = total_xml[i][:-4] + '\n'
    if i in trainval:
        ftrainval.write(name)
        if i in train:
            ftrain.write(name)
        else:
            fval.write(name)
    else:
        ftest.write(name)

ftrainval.close()
ftrain.close()
fval.close()
ftest.close()

直接拷贝这段代码放到split.py中,然后把这个Python文件放到与刚才新建的VOC2007同级目录下,打开终端输入以下命令,执行文件:

python split.py /home/zhp/Downloads/VOCdevkit/VOC2007 ##后面的目录为你自己的VOC2007的路径

执行完成后,就会在你新建的Main文件夹下看到四个.txt文件:

第五步,执行官网代码的voc_annotation.py文件:

import os
import argparse
import xml.etree.ElementTree as ET

def convert_voc_annotation(data_path, data_type, anno_path, use_difficult_bbox=True):

    classes = ['person','bicycle','car','motorbike','aeroplane','bus','train','truck',
               'boat','traffic light','fire hydrant','stop sign','parking meter',
               'bench','bird','cat','dog','horse','sheep','cow','elephant','bear',
               'zebra','giraffe','backpack','umbrella','handbag','tie','suitcase',
               'frisbee','skis','snowboard','sports ball','kite','baseball bat',
               'baseball glove','skateboard','surfboard','tennis racket','bottle',
               'wine glass','cup','fork','knife','spoon','bowl','banana','apple',
               'sandwich','orange','broccoli','carrot','hot dog','pizza','donut',
               'cake','chair','sofa','pottedplant','bed','diningtable','toilet',
               'tvmonitor','laptop','mouse','remote','keyboard','cell phone','microwave',
               'oven','toaster','sink','refrigerator','book','clock','vase','scissors',
               'teddy bear','hair drier','toothbrush','couch','dining table','potted plant',
               'tv','motorcycle','airplane']
    img_inds_file = os.path.join(data_path, 'ImageSets', 'Main', data_type + '.txt')
    with open(img_inds_file, 'r') as f:
        txt = f.readlines()
        image_inds = [line.strip() for line in txt]

    with open(anno_path, 'a') as f:
        for image_ind in image_inds:
            image_path = os.path.join(data_path, 'JPEGImages', image_ind + '.jpg')
            annotation = image_path
            label_path = os.path.join(data_path, 'Annotations', image_ind + '.xml')
            root = ET.parse(label_path).getroot()
            objects = root.findall('object')
            for obj in objects:
                difficult = obj.find('difficult').text.strip()
                if (not use_difficult_bbox) and(int(difficult) == 1):
                    continue
                bbox = obj.find('bndbox')
                class_ind = classes.index(obj.find('name').text.lower().strip())
                xmin = bbox.find('xmin').text.strip()
                xmax = bbox.find('xmax').text.strip()
                ymin = bbox.find('ymin').text.strip()
                ymax = bbox.find('ymax').text.strip()
                annotation += ' ' + ','.join([xmin, ymin, xmax, ymax, str(class_ind)])
            print(annotation)
            f.write(annotation + "\n")
    return len(image_inds)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--data_path", default="/home/zhp/Downloads/VOCdevkit")  #这个目录是你自己新建的,放所有图片和文件的
    parser.add_argument("--train_annotation", default="/media/zhp/RKB_1TB/tensorflow-yolov3-master/data/dataset/voc_train.txt")  #这个目录是通过这个文件生成的TXT文件的存放地址,一般看你项目存放到哪里了
    parser.add_argument("--test_annotation",  default="/media/zhp/RKB_1TB/tensorflow-yolov3-master/data/dataset/voc_test.txt") #同上
    flags = parser.parse_args()

    if os.path.exists(flags.train_annotation):os.remove(flags.train_annotation)
    if os.path.exists(flags.test_annotation):os.remove(flags.test_annotation)

    num1 = convert_voc_annotation(os.path.join(flags.data_path, 'VOC2007'), 'trainval', flags.train_annotation, False) #这个目录是你自己建的目录下的voc2007的地址
    num2 = convert_voc_annotation(os.path.join(flags.data_path, 'VOC2007'), 'val', flags.train_annotation, False)
    num3 = convert_voc_annotation(os.path.join(flags.data_path, 'VOC2007'),  'test', flags.test_annotation, False) #这个目录是你自己建的目录下的voc2007的地址
    print('=> The number of image for train is: %d\tThe number of image for test is:%d' %(num1 + num2, num3))

按照注释,将文件中的目录换成自己的目录,执行这个文件就可以生成项目所需要的TXT文件啦!

然后后面的步骤按照github上的readme的提示一步一步来就可以啦!

小白第一次写博客,还望大神们多多指教,我看到之后会及时回复并进行修改的~~

  • 1
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值