open-mmlab有许多非常实用的框架,其中目标检测的话mmdetection确实很实用。但语义分割的话当属mmsegmentation,这篇博客就是介绍如何用mmsegmentation训练自己的数据集。
1.一天最无聊的事从搭环境开始
1.conda create -n openmmlab python=3.7
2.conda activate openmmlab
3.conda install pytorch==1.7.0 torchvision==0.8.0 torchaudio==0.7.0 cudatoolkit=11.0
4.pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.7.0/index.html
5.pip install mmseg
#需要注意的是pytorch版本、cuda版本与mmcv版本需搭配,否则会出错。
mmdetection的环境mmsegmentation也可以用,有mmdetection环境的话就可以不用重新搭建了
给个代码链接mmsegmentation
2.环境搭好下个官方的模型测试一下,测试脚本如下。
from mmseg.apis import inference_segmentor, init_segmentor
import mmcv
config_file = '../configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py'
checkpoint_file = '../ckpt/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'
# 通过配置文件和模型权重文件构建模型
model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
# 对单张图片进行推理并展示结果
img = 'demo.png' # or img = mmcv.imread(img), which will only load it once
result = inference_segmentor(model, img)
# 在新窗口中可视化推理结果
model.show_result(img, result, show=True)
# 或将可视化结果存储在文件中
# 你可以修改 opacity 在(0,1]之间的取值来改变绘制好的分割图的透明度
model.show_result(img, result, out_file='result.jpg', opacity=1)
3.至此,可以准备自己的数据集了。数据集采用VOC格式的分割数据集,标注好不知道怎么处理的,请参考博客VOC图像分割数据集制作。
4.本博客采用deeplabV3为例。
mmsegmentation目录下新建一个data用于存放自己的数据集
5.修改图像scale,不修改也可以,万一报内存错误就改小点。
# dataset settings
dataset_type = 'PascalVOCDataset'
data_root = 'data/VOCdevkit/VOC2012'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
#修改图像scale
#dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='Resize', img_scale=(1280, 880), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1280, 880),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/val.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
split='ImageSets/Segmentation/test.txt',
pipeline=test_pipeline))
6.自己的任务可能类别与VOC类别不一样,需要对class进行修改,选了deeplabv3plus_r50-d8.py作为backbone。对其num_class进行修改
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
decode_head=dict(
type='DepthwiseSeparableASPPHead',
in_channels=2048,
in_index=3,
channels=512,
dilations=(1, 12, 24, 36),
c1_in_channels=256,
c1_channels=48,
dropout_ratio=0.1,
#num_classes=class+background
num_classes=3,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=1024,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
#num_classes=class+background
num_classes=3,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
7.还需修改两个地方就可以run了,去到对应的路径下找到class_names.py,由于数据格式是VOC的,所以只需要对voc_classes进行修改就行了。
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
def cityscapes_classes():
"""Cityscapes class names for external use."""
return [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
def ade_classes():
"""ADE20K class names for external use."""
return [
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag'
]
# def voc_classes():
# """Pascal VOC class names for external use."""
# return [
# 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
# 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
# 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
# 'tvmonitor'
# ]
def voc_classes():
return ['background','你自己的类别','你自己的类别']
def cocostuff_classes():
"""CocoStuff class names for external use."""
return [
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',
'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',
'blanket', 'branch', 'bridge', 'building-other', 'bush', 'cabinet',
'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile',
'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',
'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble',
'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower',
'fog', 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel',
'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', 'metal',
'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper',
'pavement', 'pillow', 'plant-other', 'plastic', 'platform',
'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof',
'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper',
'snow', 'solid-other', 'stairs', 'stone', 'straw', 'structural-other',
'table', 'tent', 'textile-other', 'towel', 'tree', 'vegetable',
'wall-brick', 'wall-concrete', 'wall-other', 'wall-panel',
'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'waterdrops',
'window-blind', 'window-other', 'wood'
]
def loveda_classes():
"""LoveDA class names for external use."""
return [
'background', 'building', 'road', 'water', 'barren', 'forest',
'agricultural'
]
def potsdam_classes():
"""Potsdam class names for external use."""
return [
'impervious_surface', 'building', 'low_vegetation', 'tree', 'car',
'clutter'
]
def vaihingen_classes():
"""Vaihingen class names for external use."""
return [
'impervious_surface', 'building', 'low_vegetation', 'tree', 'car',
'clutter'
]
def isaid_classes():
"""iSAID class names for external use."""
return [
'background', 'ship', 'store_tank', 'baseball_diamond', 'tennis_court',
'basketball_court', 'Ground_Track_Field', 'Bridge', 'Large_Vehicle',
'Small_Vehicle', 'Helicopter', 'Swimming_pool', 'Roundabout',
'Soccer_ball_field', 'plane', 'Harbor'
]
def stare_classes():
"""stare class names for external use."""
return ['background', 'vessel']
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
[0, 0, 230], [119, 11, 32]]
def ade_palette():
"""ADE20K palette for external use."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]]
#def voc_palette():
"""Pascal VOC palette for external use."""
# return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
# [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
#[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
#[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
def voc_palette():
"""Pascal VOC palette for external use."""
return [[0, 0, 0], [128, 0, 0], [0, 128, 0]]
def cocostuff_palette():
"""CocoStuff palette for external use."""
return [[0, 192, 64], [0, 192, 64], [0, 64, 96], [128, 192, 192],
[0, 64, 64], [0, 192, 224], [0, 192, 192], [128, 192, 64],
[0, 192, 96], [128, 192, 64], [128, 32, 192], [0, 0, 224],
[0, 0, 64], [0, 160, 192], [128, 0, 96], [128, 0, 192],
[0, 32, 192], [128, 128, 224], [0, 0, 192], [128, 160, 192],
[128, 128, 0], [128, 0, 32], [128, 32, 0], [128, 0, 128],
[64, 128, 32], [0, 160, 0], [0, 0, 0], [192, 128, 160], [0, 32, 0],
[0, 128, 128], [64, 128, 160], [128, 160, 0], [0, 128, 0],
[192, 128, 32], [128, 96, 128], [0, 0, 128], [64, 0, 32],
[0, 224, 128], [128, 0, 0], [192, 0, 160], [0, 96, 128],
[128, 128, 128], [64, 0, 160], [128, 224, 128], [128, 128, 64],
[192, 0, 32], [128, 96, 0], [128, 0, 192], [0, 128, 32],
[64, 224, 0], [0, 0, 64], [128, 128, 160], [64, 96, 0],
[0, 128, 192], [0, 128, 160], [192, 224, 0], [0, 128, 64],
[128, 128, 32], [192, 32, 128], [0, 64, 192], [0, 0, 32],
[64, 160, 128], [128, 64, 64], [128, 0, 160], [64, 32, 128],
[128, 192, 192], [0, 0, 160], [192, 160, 128], [128, 192, 0],
[128, 0, 96], [192, 32, 0], [128, 64, 128], [64, 128, 96],
[64, 160, 0], [0, 64, 0], [192, 128, 224], [64, 32, 0],
[0, 192, 128], [64, 128, 224], [192, 160, 0], [0, 192, 0],
[192, 128, 96], [192, 96, 128], [0, 64, 128], [64, 0, 96],
[64, 224, 128], [128, 64, 0], [192, 0, 224], [64, 96, 128],
[128, 192, 128], [64, 0, 224], [192, 224, 128], [128, 192, 64],
[192, 0, 96], [192, 96, 0], [128, 64, 192], [0, 128, 96],
[0, 224, 0], [64, 64, 64], [128, 128, 224], [0, 96, 0],
[64, 192, 192], [0, 128, 224], [128, 224, 0], [64, 192, 64],
[128, 128, 96], [128, 32, 128], [64, 0, 192], [0, 64, 96],
[0, 160, 128], [192, 0, 64], [128, 64, 224], [0, 32, 128],
[192, 128, 192], [0, 64, 224], [128, 160, 128], [192, 128, 0],
[128, 64, 32], [128, 32, 64], [192, 0, 128], [64, 192, 32],
[0, 160, 64], [64, 0, 0], [192, 192, 160], [0, 32, 64],
[64, 128, 128], [64, 192, 160], [128, 160, 64], [64, 128, 0],
[192, 192, 32], [128, 96, 192], [64, 0, 128], [64, 64, 32],
[0, 224, 192], [192, 0, 0], [192, 64, 160], [0, 96, 192],
[192, 128, 128], [64, 64, 160], [128, 224, 192], [192, 128, 64],
[192, 64, 32], [128, 96, 64], [192, 0, 192], [0, 192, 32],
[64, 224, 64], [64, 0, 64], [128, 192, 160], [64, 96, 64],
[64, 128, 192], [0, 192, 160], [192, 224, 64], [64, 128, 64],
[128, 192, 32], [192, 32, 192], [64, 64, 192], [0, 64, 32],
[64, 160, 192], [192, 64, 64], [128, 64, 160], [64, 32, 192],
[192, 192, 192], [0, 64, 160], [192, 160, 192], [192, 192, 0],
[128, 64, 96], [192, 32, 64], [192, 64, 128], [64, 192, 96],
[64, 160, 64], [64, 64, 0]]
def loveda_palette():
"""LoveDA palette for external use."""
return [[255, 255, 255], [255, 0, 0], [255, 255, 0], [0, 0, 255],
[159, 129, 183], [0, 255, 0], [255, 195, 128]]
def potsdam_palette():
"""Potsdam palette for external use."""
return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
[255, 255, 0], [255, 0, 0]]
def vaihingen_palette():
"""Vaihingen palette for external use."""
return [[255, 255, 255], [0, 0, 255], [0, 255, 255], [0, 255, 0],
[255, 255, 0], [255, 0, 0]]
def isaid_palette():
"""iSAID palette for external use."""
return [[0, 0, 0], [0, 0, 63], [0, 63, 63], [0, 63, 0], [0, 63, 127],
[0, 63, 191], [0, 63, 255], [0, 127, 63], [0, 127,
127], [0, 0, 127],
[0, 0, 191], [0, 0, 255], [0, 191, 127], [0, 127, 191],
[0, 127, 255], [0, 100, 155]]
def stare_palette():
"""STARE palette for external use."""
return [[120, 120, 120], [6, 230, 230]]
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug'],
'loveda': ['loveda'],
'potsdam': ['potsdam'],
'vaihingen': ['vaihingen'],
'cocostuff': [
'cocostuff', 'cocostuff10k', 'cocostuff164k', 'coco-stuff',
'coco-stuff10k', 'coco-stuff164k', 'coco_stuff', 'coco_stuff10k',
'coco_stuff164k'
],
'isaid': ['isaid', 'iSAID'],
'stare': ['stare', 'STARE']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
def get_palette(dataset):
"""Get class palette (RGB) of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_palette()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from .builder import DATASETS
from .custom import CustomDataset
@DATASETS.register_module()
class PascalVOCDataset(CustomDataset):
"""Pascal VOC dataset.
Args:
split (str): Split txt file for Pascal VOC.
"""
# CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
# 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
# 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
# 'train', 'tvmonitor')
CLASSES = ['background','你自己的类别','你自己的类别']
# PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
# [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
# [192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
# [192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
# [128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
#染色板,可根据需要自己设置
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0]]
def __init__(self, split, **kwargs):
super(PascalVOCDataset, self).__init__(
img_suffix='.jpg', seg_map_suffix='.png', split=split, **kwargs)
assert osp.exists(self.img_dir) and self.split is not None
8.最后一步,对mmsegmentation进行重新编译
python setup.py install
9.训练,看图说话
python tools/train.py configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py
10.测试脚本
from mmseg.apis import inference_segmentor, init_segmentor
import mmcv
config_file = '你自己的配置文件'
checkpoint_file = '模型保存路径'
# 通过配置文件和模型权重文件构建模型
model = init_segmentor(config_file, checkpoint_file, device='cuda:0')
# 对单张图片进行推理并展示结果
img = 'demo.png' # or img = mmcv.imread(img), which will only load it once
result = inference_segmentor(model, img)
# 在新窗口中可视化推理结果
model.show_result(img, result, show=True)
# 或将可视化结果存储在文件中
# 你可以修改 opacity 在(0,1]之间的取值来改变绘制好的分割图的透明度
model.show_result(img, result, out_file='result.jpg', opacity=1)
11测试多张图
python tools/test.py work_dirs/ann_r50-d8_512x512_20k_voc12aug_baseline/ann_r50-d8_512x512_20k_voc12aug.py work_dirs/ann_r50-d8_512x512_20k_voc12aug_baseline/latest.pth --show-dir output