How to Train with Object Detection API
Assert we have this directories.
- research CURRENT_DIR
- object_detection WORK_DIR from this dir to run the script
- prj_dir PRJ_FOLDER
- local_or_cloud-train_or_eval-algorithm-dataset-det_or_seg.sh
- readme.md record the project’s main task, and how to use this project
- init_models INIT_FOLDER
- exp_200430 EXP_DIR experiment and date
- train
- eval
- vis
- export
- logs
- faster_rcnn_resnet50_pets.config
- datasets DATASET_DIR
- TLR_DC_Road_v1_0
- Annotations
- ImageSet
- JPEGImages
- SegmentationClass
- SegmentationObject
- TFRecordDet_v1_0
- train-00000-of-00004.tfrecord
- trainval-00000-of-00004.tfrecord
- val-00000-of-00004.tfrecord
- label_map.pbtxt
- TFRecordSeg_v1_0
- *.tfrecord
- scripts_to_create_tfrecord.py
- scripts_to_extract_images_from_video.py
- readme.md
- TLR_DC_Road_v1_0
- prj_dir PRJ_FOLDER
- object_detection WORK_DIR from this dir to run the script
from models/research/deeplab/train_deeplab_pascal_voc_seg.sh
#!/bin/bash
# Usage:
# # From the tensorflow/models/research/object_detection directory.
# sh ./prj_dir/local_or_cloud-train_or_eval-algorithm-dataset-det_or_seg.sh
# Exit immediately if a command exits with a non-zero status.
set -e
# Move one-level up to tensorflow/models/research directory.
cd ..
# Update PYTHONPATH.
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim
# Set up the working environment.
CURRENT_DIR=$(pwd)
WORK_DIR="${CURRENT_DIR}/object_detection"
# Go to datasets folder and download PASCAL VOC 2012 segmentation dataset.
DATASET_DIR="datasets"
# cd "${WORK_DIR}/${DATASET_DIR}"
# sh download_and_convert_voc2012.sh
# Go back to original directory.
cd "${CURRENT_DIR}"
# Set up the working directories.
PRJ_DIR="traffic_light_det_v1_0"
EXP_DIR="exp_v200430"
INIT_FOLDER="${WORK_DIR}/${PRJ_DIR}/init_models"
TRAIN_LOGDIR="${WORK_DIR}/${PRJ_DIR}/${EXP_DIR}/train"
EVAL_LOGDIR="${WORK_DIR}/${PRJ_DIR}/${EXP_DIR}/eval"
VIS_LOGDIR="${WORK_DIR}/${PRJ_DIR}/${EXP_DIR}/vis"
EXPORT_DIR="${WORK_DIR}/${PRJ_DIR}/${EXP_DIR}/export"
LOGS_DIR="${WORK_DIR}/${PRJ_DIR}/${EXP_DIR}/logs"
mkdir -p "${INIT_FOLDER}"
mkdir -p "${TRAIN_LOGDIR}"
mkdir -p "${EVAL_LOGDIR}"
mkdir -p "${VIS_LOGDIR}"
mkdir -p "${EXPORT_DIR}"
mkdir -p "${LOGS_DIR}"
# Copy locally the trained checkpoint as the initial checkpoint.
# TF_INIT_ROOT="http://download.tensorflow.org/models"
# TF_INIT_CKPT="deeplabv3_pascal_train_aug_2018_01_04.tar.gz"
# cd "${INIT_FOLDER}"
# wget -nd -c "${TF_INIT_ROOT}/${TF_INIT_CKPT}"
# tar -xf "${TF_INIT_CKPT}"
# cd "${CURRENT_DIR}"
# TO BE REWRITE
PRJ_DATASET="${WORK_DIR}/${DATASET_DIR}/TLR_DC_Road_v1_0/TFRecordObjDet"
# Train, Eval and Test
# From the tensorflow/models/research/ directory
PIPELINE_CONFIG_PATH="${WORK_DIR}/${PRJ_DIR}/${EXP_DIR}/train"
MODEL_DIR="${TRAIN_LOGDIR}"
NUM_TRAIN_STEPS=50000
SAMPLE_1_OF_N_EVAL_EXAMPLES=1
python object_detection/model_main.py \
--pipeline_config_path=${PIPELINE_CONFIG_PATH} \
--model_dir=${MODEL_DIR} \
--num_train_steps=${NUM_TRAIN_STEPS} \
--sample_1_of_n_eval_examples=$SAMPLE_1_OF_N_EVAL_EXAMPLES \
--alsologtostderr
# Export the trained checkpoint.
CKPT_PATH="${TRAIN_LOGDIR}/model.ckpt-${NUM_ITERATIONS}"
EXPORT_PATH="${EXPORT_DIR}/frozen_inference_graph.pb"
python "${WORK_DIR}"/export_model.py \
--logtostderr \
--checkpoint_path="${CKPT_PATH}" \
--export_path="${EXPORT_PATH}" \
--model_variant="xception_65" \
--atrous_rates=6 \
--atrous_rates=12 \
--atrous_rates=18 \
--output_stride=16 \
--decoder_output_stride=4 \
--num_classes=21 \
--crop_size=513 \
--crop_size=513 \
--inference_scales=1.0
# Run inference with the exported checkpoint.
# Please refer to the provided deeplab_demo.ipynb for an example.