object detection训练自己数据

1、用labelImg标自己数据集。

并将图片存放在JPEGImages中,xml存放在Annotations中

2、分离训练和测试数据

import os
import random
 
trainval_percent = 0.66
train_percent = 0.5
xmlfilepath = 'Annotations'
txtsavepath = 'ImageSets\Main'
total_xml = os.listdir(xmlfilepath)
print(total_xml)
num=len(total_xml)
list=range(num)
tv=int(num*trainval_percent)
tr=int(tv*train_percent)
trainval= random.sample(list,tv)
train=random.sample(trainval,tr)
 
ftrainval = open('ImageSets/Main/trainval.txt', 'w')
ftest = open('ImageSets/Main/test.txt', 'w')
ftrain = open('ImageSets/Main/train.txt', 'w')
fval = open('ImageSets/Main/val.txt', 'w')
 
for i  in list:
    name=total_xml[i][:-4]+'\n'
    if i in trainval:
        ftrainval.write(name)
        if i in train:
            ftrain.write(name)
        else:
            fval.write(name)
    else:
        ftest.write(name)
 
ftrainval.close()
ftrain.close()
fval.close()
ftest .close()

此时ImageSets/Main/目录下生成这四个文件

3、根据train.txt 和 test.txt中内容分别建立train和test文件(存放xml文件)

import os
import shutil
files = os.listdir('E:\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\Annotations')
file= open('E:\\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\ImageSets\\Main\\test.txt','r')
newpath='E:\\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\test'
num1=0
for line in file.readlines():
    num1=num1+1
    message=line.split('\n')
    num=message[0]
    xmlName=num+'.xml'
    for i in files:
        if i==xmlName:
            oldpath='E:\gitcode\\tensorflow-model\\VOCPolice\\VOC2007\\Annotations'+"\\"+i
            shutil.copy(oldpath,newpath)
            break
print(num1)

此时train和test文件中存放的就是train.txt  test.txt中所列的xml文件

4、将xml转换为tfrecord文件

方法一: xml转为csv,csv转为tfrecord

方法二:直接采用object_detection文件中的create_pascal_tf_record.py 

本人采用方法二失败了。。方法一成功了。所以就列出来方法一的配置过程:

(1)将xml转为csv



'''
function:xml2csv
'''
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET


def xml_to_csv(path):

    xml_list = []
    for xml_file in glob.glob(path + '/*.xml'):
        tree = ET.parse(xml_file)
        root = tree.getroot()
        for member in root.findall('object'):
            value = (root.find('filename').text,
                     int(root.find('size')[0].text),
                     int(root.find('size')[1].text),
                     member[0].text,
                     int(member[4][0].text),
                     int(member[4][1].text),
                     int(member[4][2].text),
                     int(member[4][3].text)
                     )
            xml_list.append(value)
    column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
    xml_df = pd.DataFrame(xml_list, columns=column_name)
    return xml_df


def main():
    for directory in ['train', 'test']:
        project_path = 'E:\\gitcode\\tensorflow-model\\VOCPolice\\VOC2007'
        image_path = os.path.join(project_path, directory)
        xml_df = xml_to_csv(image_path)
        xml_df.to_csv('E:/gitcode/tensorflow-model/VOCPolice/VOC2007/{}_labels.csv'.format(directory), index=None)
        print('Successfully converted xml to csv.')


main()

运行完后VOC2007下有两个文件:train_labels.csv test_labels.csv。如下图所示:

这里运行的前提是之前产生的存放xml的train和test文件夹在project_path下。

(2)将csv文件转换为tfrecord文件

# generate_tfrecord.py

# -*- coding: utf-8 -*-


"""
Usage:
  # From tensorflow/models/
  # Create train data:
  python generate_tfrecord.py --csv_input=data/tv_vehicle_labels.csv  --output_path=train.record
  # Create test data:
  python generate_tfrecord.py --csv_input=data/test_labels.csv  --output_path=test.record
"""


import os
import io
import pandas as pd
import tensorflow as tf

from PIL import Image
from object_detection.utils import dataset_util
from collections import namedtuple, OrderedDict

os.chdir('E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection')

flags = tf.app.flags
flags.DEFINE_string('csv_input', 'E:/gitcode/tensorflow-model/VOCPolice/VOC2007/train_labels.csv', 'Path to the CSV input')
flags.DEFINE_string('output_path', 'train.record', 'Path to output TFRecord')
FLAGS = flags.FLAGS


# TO-DO replace this with label map
def class_text_to_int(row_label):
    if row_label == 'police':     # 需改动
        return 1
    else:
        None


def split(df, group):
    data = namedtuple('data', ['filename', 'object'])
    gb = df.groupby(group)
    return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]


def create_tf_example(group, path):
    with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
        encoded_jpg = fid.read()
    encoded_jpg_io = io.BytesIO(encoded_jpg)
    image = Image.open(encoded_jpg_io)
    width, height = image.size

    filename = group.filename.encode('utf8')
    image_format = b'jpg'
    xmins = []
    xmaxs = []
    ymins = []
    ymaxs = []
    classes_text = []
    classes = []

    for index, row in group.object.iterrows():
        xmins.append(row['xmin'] / width)
        xmaxs.append(row['xmax'] / width)
        ymins.append(row['ymin'] / height)
        ymaxs.append(row['ymax'] / height)
        classes_text.append(row['class'].encode('utf8'))
        classes.append(class_text_to_int(row['class']))

    tf_example = tf.train.Example(features=tf.train.Features(feature={
        'image/height': dataset_util.int64_feature(height),
        'image/width': dataset_util.int64_feature(width),
        'image/filename': dataset_util.bytes_feature(filename),
        'image/source_id': dataset_util.bytes_feature(filename),
        'image/encoded': dataset_util.bytes_feature(encoded_jpg),
        'image/format': dataset_util.bytes_feature(image_format),
        'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
        'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
        'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
        'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
        'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
        'image/object/class/label': dataset_util.int64_list_feature(classes),
    }))
    return tf_example


def main(_):
    writer = tf.python_io.TFRecordWriter(FLAGS.output_path)
    path = os.path.join(os.getcwd(), 'images/test')         #  需改动
    examples = pd.read_csv(FLAGS.csv_input)
    grouped = split(examples, 'filename')
    for group in grouped:
        tf_example = create_tf_example(group, path)
        writer.write(tf_example.SerializeToString())

    writer.close()
    output_path = os.path.join(os.getcwd(), FLAGS.output_path)
    print('Successfully created the TFRecords: {}'.format(output_path))


if __name__ == '__main__':
    tf.app.run()

修改上述代码中flags.DEFINE_string的csv_input路径为上述步骤产生的train和test的csv路径。然后将output_path分别命名为train.record和test.record文件.运行两次,即可产生train.record 和test.record(注意你自己选择的output路径)

5、准备训练

(1)下载模型ssd_mobilenet_v1_coco_2017_11_17并解压在object_detection目录下

(2)修改pascal_label_map.pbtxt文件。也可新建自己的pbtxt文件。

因为我这里只有一类,所以修改如上。

(3)修改模型的配置文件

修改位置如下:

num_classes: 1  #你的类别数
batch_size: 2   #你的电脑能承受的数量
fine_tune_checkpoint: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/ssd_mobilenet_v1_coco_2017_11_17/model.ckpt"
  #改为你的模型所在的位置 我这里全部使用绝对路径
num_steps: 50000  #改为你所需要训练次数

train_input_reader {
  label_map_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/data/pascal_label_map.pbtxt"
  tf_record_input_reader {
    input_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/train.record"
  }
}  #分别将label_map_path 和train input_path 改为你的文件所在的位置

eval_input_reader {
  label_map_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/data/pascal_label_map.pbtxt"
  shuffle: false
  num_readers: 1
  tf_record_input_reader {
    input_path: "E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/object_detection/test.record"
  }
}  #分别将label_map_path 和test input_path 改为你的文件所在的位置

6、开始训练

在object_detection/legacy/train.py 文件中

修改

flags.DEFINE_string中train_dir为你的结果存放path
flags.DEFINE_string中'pipeline_config_path为刚才下载模型的.config所在路径

然后运行即可。

7、查看训练曲线

在object_detection目录下:  logdir目录就是train_dir目录

tensorboard --logdir=E:/gitcode/tensorflow-model/chde222-models-master-MyData1230/models/research/ssdmodel1231

8、 导出模型

修改export_inference_graph.py 文件中

flags.DEFINE_string中pipeline_config_path为你的ssd模型.config所在路径
flags.DEFINE_string中trained_checkpoint_prefix为你训练结果的model.ckpt-训练次数
flags.DEFINE_string中output_directory为你的导出路径

然后运行即可保存

9、测试模型

在object_detection下新建myTest.py 文件

# coding: utf-8

# # Object Detection Demo
# Welcome to the object detection inference walkthrough!  This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start.


from distutils.version import StrictVersion
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile

from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image

# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops

# if StrictVersion(tf.__version__) < StrictVersion('1.9.0'):
#   raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')


# ## Env setup

# In[2]:


# This is needed to display the images.
# get_ipython().magic(u'matplotlib inline')


# ## Object detection imports
# Here are the imports from the object detection module.



from object_detection.utils import label_map_util

from object_detection.utils import visualization_utils as vis_util


# # Model preparation

# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.

# In[4]:


# What model to download.
MODEL_NAME = './Police_detection1231/'
# MODEL_FILE = MODEL_NAME + '.tar.gz'
# DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'

# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'

# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'pascal_label_map.pbtxt')

NUM_CLASSES = 1


# ## Download Model



# opener = urllib.request.URLopener()
# opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
'''
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
  file_name = os.path.basename(file.name)
  if 'frozen_inference_graph.pb' in file_name:
    tar_file.extract(file, os.getcwd())
'''

# ## Load a (frozen) Tensorflow model into memory.



detection_graph = tf.Graph()
with detection_graph.as_default():
  od_graph_def = tf.GraphDef()
  with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
    serialized_graph = fid.read()
    od_graph_def.ParseFromString(serialized_graph)
    tf.import_graph_def(od_graph_def, name='')


# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`.  Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine



label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)


# ## Helper code

# In[8]:


def load_image_into_numpy_array(image):
  (im_width, im_height) = image.size
  return np.array(image.getdata()).reshape(
      (im_height, im_width, 3)).astype(np.uint8)


# # Detection



# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_police'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 22) ]

# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)


# In[10]:

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
def run_inference_for_single_image(image, graph):
  with graph.as_default():
    with tf.Session(config=config) as sess:
      # Get handles to input and output tensors
      ops = tf.get_default_graph().get_operations()
      all_tensor_names = {output.name for op in ops for output in op.outputs}
      tensor_dict = {}
      for key in [
          'num_detections', 'detection_boxes', 'detection_scores',
          'detection_classes', 'detection_masks'
      ]:
        tensor_name = key + ':0'
        if tensor_name in all_tensor_names:
          tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
              tensor_name)
      if 'detection_masks' in tensor_dict:
        # The following processing is only for single image
        detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
        detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
        # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
        real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
        detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
        detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
        detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
            detection_masks, detection_boxes, image.shape[0], image.shape[1])
        detection_masks_reframed = tf.cast(
            tf.greater(detection_masks_reframed, 0.5), tf.uint8)
        # Follow the convention by adding back the batch dimension
        tensor_dict['detection_masks'] = tf.expand_dims(
            detection_masks_reframed, 0)
      image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')

      # Run inference
      output_dict = sess.run(tensor_dict,
                             feed_dict={image_tensor: np.expand_dims(image, 0)})

      # all outputs are float32 numpy arrays, so convert types as appropriate
      output_dict['num_detections'] = int(output_dict['num_detections'][0])
      output_dict['detection_classes'] = output_dict[
          'detection_classes'][0].astype(np.uint8)
      output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
      output_dict['detection_scores'] = output_dict['detection_scores'][0]
      if 'detection_masks' in output_dict:
        output_dict['detection_masks'] = output_dict['detection_masks'][0]
  return output_dict


# In[ ]:


for image_path in TEST_IMAGE_PATHS:
  image = Image.open(image_path)
  # the array based representation of the image will be used later in order to prepare the
  # result image with boxes and labels on it.
  image_np = load_image_into_numpy_array(image)
  # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
  image_np_expanded = np.expand_dims(image_np, axis=0)
  # Actual detection.
  output_dict = run_inference_for_single_image(image_np, detection_graph)
  # Visualization of the results of a detection.
  vis_util.visualize_boxes_and_labels_on_image_array(
      image_np,
      output_dict['detection_boxes'],
      output_dict['detection_classes'],
      output_dict['detection_scores'],
      category_index,
      instance_masks=output_dict.get('detection_masks'),
      use_normalized_coordinates=True,
      line_thickness=8)
  # plt.subplot(1, 2, 1)
  # plt.imshow(image)
  # plt.subplot(1, 2, 2)
  # plt.imshow(image_np)
  plt.savefig(image_path + '_labeled.jpg')
  # plt.show()
  # plt.legend()
print("finished")

修改MODEL_NAME为你导出模型的路径

修改 PATH_TO_LABELS为你的.pbtxt文件所在路径

修改NUM_CLASSES为你的类别数

修改PATH_TO_TEST_IMAGES_DIR为你的测试图片所在路径。同时命名形式为image1.jpg、image2.jpg。

在TEST_IMAGE_PATHS中range输入你的图片范围

运行即可。

10 评估模型

修改legacy/eval.py中以下参数。并运行如下

flags.DEFINE_string(
    'checkpoint_dir', 'your trained model path',
    'Directory containing checkpoints to evaluate, typically '
    'set to `train_dir` used in the training job.')
flags.DEFINE_string('eval_dir', 'your eval model save path', 'Directory to write eval summaries to.')
flags.DEFINE_string(
    'pipeline_config_path', 'your .config path',
    'Path to a pipeline_pb2.TrainEvalPipelineConfig config '
    'file. If provided, other configs are ignored')

大功告成

参考自

https://blog.csdn.net/Arvin_liang/article/details/84752427

https://chtseng.wordpress.com/2019/02/16/%E5%A6%82%E4%BD%95%E4%BD%BF%E7%94%A8google-object-detection-api%E8%A8%93%E7%B7%B4%E8%87%AA%E5%B7%B1%E7%9A%84%E6%A8%A1%E5%9E%8B/

https://www.cnblogs.com/zongfa/p/9663649.html

https://blog.csdn.net/linolzhang/article/details/87121875

https://blog.csdn.net/Arvin_liang/article/details/84752427

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

chde2Wang

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值