1. 安装配置mmdetection环境,直接看官网
最好用conda新建环境管理,防止包冲突
git clone mmdetection源码到本地
我的环境python是3.9
# 创建虚拟环境
conda create -n mmdet_py39 python=3.9 anaconda
# 安装pytorch
pip install torch==2.1.0 torchvision==0.16.0 torchaudio==2.1.0 --index-url https://download.pytorch.org/whl/cu121
# 使用 MIM 安装 MMEngine 和 MMCV。 pip install -U openmim mim install mmengine mim install "mmcv>=2.0.0"# 安装mmdetection
git clone https://github.com/open-mmlab/mmdetection.git cd mmdetection pip install -v -e .
2. 开始实验
2.1 准备数据集
需要使用COCO数据集格式
yolo转为coco数据集格式的脚本
dataset_dir
-------- images
-------- labels
-------- classes.txt
import os
import cv2
import json
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import argparse
# python yolo2coco.py --root_dir VisDrone2019-DET-train --save_path train.json
# python yolo2coco.py --root_dir VisDrone2019-DET-val --save_path val.json
# python yolo2coco.py --root_dir VisDrone2019-DET-test-dev --save_path test.json
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', default='./dataset/valid',type=str, help="root path of images and labels, include ./images and ./labels and classes.txt")
parser.add_argument('--save_path', type=str,default='./valid.json', help="if not split the dataset, give a path to a json file")
parser.add_argument('--random_split', action='store_true', help="random split the dataset, default ratio is 8:1:1")
parser.add_argument('--split_by_file', action='store_true', help="define how to split the dataset, include ./train.txt ./val.txt ./test.txt ")
arg = parser.parse_args()
def train_test_val_split_random(img_paths,ratio_train=0.8,ratio_test=0.1,ratio_val=0.1):
# 这里可以修改数据集划分的比例。
assert int(ratio_train+ratio_test+ratio_val) == 1
train_img, middle_img = train_test_split(img_paths,test_size=1-ratio_train, random_state=233)
ratio=ratio_val/(1-ratio_train)
val_img, test_img =train_test_split(middle_img,test_size=ratio, random_state=233)
print("NUMS of train:val:test = {}:{}:{}".format(len(train_img), len(val_img), len(test_img)))
return train_img, val_img, test_img
def train_test_val_split_by_files(img_paths, root_dir):
# 根据文件 train.txt, val.txt, test.txt(里面写的都是对应集合的图片名字) 来定义训练集、验证集和测试集
phases = ['train', 'val', 'test']
img_split = []
for p in phases:
define_path = os.path.join(root_dir, f'{p}.txt')
print(f'Read {p} dataset definition from {define_path}')
assert os.path.exists(define_path)
with open(define_path, 'r') as f:
img_paths = f.readlines()
# img_paths = [os.path.split(img_path.strip())[1] for img_path in img_paths] # NOTE 取消这句备注可以读取绝对地址。
img_split.append(img_paths)
return img_split[0], img_split[1], img_split[2]
def yolo2coco(arg):
root_path = arg.root_dir
print("Loading data from ",root_path)
assert os.path.exists(root_path)
originLabelsDir = os.path.join(root_path, 'labels')
originImagesDir = os.path.join(root_path, 'images')
with open(os.path.join(root_path, 'classes.txt')) as f:
classes = f.read().strip().split()
# images dir name
indexes = os.listdir(originImagesDir)
if arg.random_split or arg.split_by_file:
# 用于保存所有数据的图片信息和标注信息
train_dataset = {'categories': [], 'annotations': [], 'images': []}
val_dataset = {'categories': [], 'annotations': [], 'images': []}
test_dataset = {'categories': [], 'annotations': [], 'images': []}
# 建立类别标签和数字id的对应关系, 类别id从0开始。
for i, cls in enumerate(classes, 0):
train_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
val_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
test_dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
if arg.random_split:
print("spliting mode: random split")
train_img, val_img, test_img = train_test_val_split_random(indexes,0.8,0.1,0.1)
elif arg.split_by_file:
print("spliting mode: split by files")
train_img, val_img, test_img = train_test_val_split_by_files(indexes, root_path)
else:
dataset = {'categories': [], 'annotations': [], 'images': []}
for i, cls in enumerate(classes, 0):
dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'})
# 标注的id
ann_id_cnt = 0
for k, index in enumerate(tqdm(indexes)):
# 支持 png jpg 格式的图片。
txtFile = index.replace('images','txt').replace('.jpg','.txt').replace('.png','.txt')
# 读取图像的宽和高
im = cv2.imread(os.path.join(root_path, 'images/') + index)
height, width, _ = im.shape
if arg.random_split or arg.split_by_file:
# 切换dataset的引用对象,从而划分数据集
if index in train_img:
dataset = train_dataset
elif index in val_img:
dataset = val_dataset
elif index in test_img:
dataset = test_dataset
# 添加图像的信息
dataset['images'].append({'file_name': index,
'id': k,
'width': width,
'height': height})
if not os.path.exists(os.path.join(originLabelsDir, txtFile)):
# 如没标签,跳过,只保留图片信息。
continue
with open(os.path.join(originLabelsDir, txtFile), 'r') as fr:
labelList = fr.readlines()
for label in labelList:
label = label.strip().split()
x = float(label[1])
y = float(label[2])
w = float(label[3])
h = float(label[4])
# convert x,y,w,h to x1,y1,x2,y2
H, W, _ = im.shape
x1 = (x - w / 2) * W
y1 = (y - h / 2) * H
x2 = (x + w / 2) * W
y2 = (y + h / 2) * H
# 标签序号从0开始计算, coco2017数据集标号混乱,不管它了。
cls_id = int(label[0])
width = max(0, x2 - x1)
height = max(0, y2 - y1)
dataset['annotations'].append({
'area': width * height,
'bbox': [x1, y1, width, height],
'category_id': cls_id,
'id': ann_id_cnt,
'image_id': k,
'iscrowd': 0,
# mask, 矩形是从左上角点按顺时针的四个顶点
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]]
})
ann_id_cnt += 1
# 保存结果
folder = os.path.join(root_path, 'annotations')
if not os.path.exists(folder):
os.makedirs(folder)
if arg.random_split or arg.split_by_file:
for phase in ['train','val','test']:
json_name = os.path.join(root_path, 'annotations/{}.json'.format(phase))
with open(json_name, 'w') as f:
if phase == 'train':
json.dump(train_dataset, f)
elif phase == 'val':
json.dump(val_dataset, f)
elif phase == 'test':
json.dump(test_dataset, f)
print('Save annotation to {}'.format(json_name))
else:
json_name = os.path.join(root_path, 'annotations/{}'.format(arg.save_path))
with open(json_name, 'w') as f:
json.dump(dataset, f)
print('Save annotation to {}'.format(json_name))
if __name__ == "__main__":
yolo2coco(arg)
2.2 配置训练文件
在configs文件夹里找到想要的模型,以faster rcnn为例,我想用faster-rcnn_r50_fpn_ciou_1x_coco.py这个模型配置文件,我只需要继承它即可,需要修改哪里用类的继承的方法修改就行,会自动覆盖原始的值。
以下配置文件为例:configs/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_FOD_tiny.py
# 继承这个类,如果需要修改哪个就写哪个就行
_base_ = 'faster-rcnn_r50_fpn_ciou_1x_coco.py'
# 我们还需要更改 head 中的 num_classes 以匹配数据集中的类别数
model = dict(
roi_head=dict(
bbox_head=dict(
type='Shared2FCBBoxHead',
num_classes=23
)
)
)
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=50, val_interval=1)
# 修改数据集相关配置
data_root = '/home/zhangh/dataset/defect_dataset/'
metainfo = {
# 'classes': ('Straight_Knife', 'Folding_Knife', 'Scissor', 'Utility_Knife', 'Multi-tool_Knife'),
'classes': ('Abrasion', 'Crazing', 'Patches', 'Inclusion', 'Uneven', 'Blowhole', 'Break', 'Crack', 'Crescent_Gap', 'Crease', 'Silk-Spot', 'Water-Spot', 'Weld-Line', 'GC-Inclusion', 'Oil-Spot', 'Rolled-Pit', 'Punching', 'Waist-Folding', 'Bruise', 'Pitted_Surface', 'Rolled-in_Scale', 'Scratches', 'Bubble'),
# 'palette': [
# (220, 20, 60),
# ]
}
train_dataloader = dict(
batch_size=8,
num_workers=1,
dataset=dict(
data_root=data_root,
metainfo=metainfo,
ann_file='JASON/instances_train2017.json',
data_prefix=dict(img='images/train/')))
val_dataloader = dict(
batch_size=8,
num_workers=1,
dataset=dict(
data_root=data_root,
metainfo=metainfo,
ann_file='JASON/instances_val2017.json',
data_prefix=dict(img='images/val/')))
test_dataloader = dict(
batch_size=8,
num_workers=1,
dataset=dict(
data_root=data_root,
metainfo=metainfo,
ann_file='JASON/instances_test2017.json',
data_prefix=dict(img='images/test/')))
# 修改评价指标相关配置
val_evaluator = dict(ann_file=data_root + 'JASON/instances_val2017.json')
test_evaluator = dict(ann_file=data_root + 'JASON/instances_test2017.json')
# load_from =None # 加载预训练模型
# 官网
# https://mmdetection.readthedocs.io/zh-cn/latest/user_guides/test.html
# 训练
# python tools/train.py configs/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712.py
# 测试
# python tools/test.py configs/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712.py checkpoints/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth
# ./tools/dist_test.sh configs/mask_rcnn/mask-rcnn_r50_fpn_1x_coco.py checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth
# 获得模型参数量
# python tools/analysis_tools/get_flops.py configs/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_FOD_tiny.py
如需加载预训练模型,可以在对于的配置文件里找到README文件进行下载,在用load_from=模型路径即可。
2.3开始训练
用到tools/train.py这个工具
python tools/train.py configs/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712.py
训练好的权重文件会保存至work_dirs文件夹。
2.4 开始验证
用到tools/test.py这个工具
python tools/test.py configs/pascal_voc/faster-rcnn_r50_fpn_1x_voc0712.py checkpoints/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth
设置 mmdetection/mmdet/evaluation/metrics/coco_metric.py里的classwise为true,在推理的时候能够看到每个类别的ap值。
模型的参数量和计算量,需要用到tools/analysis_tools/get_flops.py文件
python tools/analysis_tools/get_flops.py configs/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_FOD_tiny.py
计算模型的fps,需要用到tools/analysis_tools/benchmark.py文件
python -m torch.distributed.launch --nproc_per_node=1 --master_port=29500 --use_env tools/analysis_tools/benchmark.py configs/faster_rcnn/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_FOD_tiny.py --checkpoint work_dirs/faster-rcnn_r18_fpn_8xb8-amp-lsj-200e_FOD_tiny/epoch_50.pth --launcher pytorch
下面这个命令也可得到:推理时间、fps、gpu memory
python tools/analysis_tools/benchmark.py <your-config-file> --checkpoint <your-model-weights-file> --task inference --fuse-conv-bn
python -m torch.distributed.launch --nproc_per_node=1 --master_port=29500
代表分布式的参数,直接复制即可
加--use_env的原因如下链接:
https://blog.csdn.net/qq_43826289/article/details/131564035