Ubuntu20运行SegNeXt代码提取道路水体(四)——成功解决训练与推理自己的数据集iou为0的问题!!

在我的这篇博文里
Ubuntu20运行SegNeXt代码提取道路水体(三)——SegNeXt训练与推理自己的数据集
经过一系列配置后
iou算出来是0
经过多次尝试后
终于让我试出来了正确配置方法!

具体的配置细节请查看这篇文章

1、在mmseg/datasets下面对数据集进行初始定义

我新建了一个myroaddata.py文件
 里面的内容是:

# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp

import mmcv
import numpy as np
from PIL import Image

from .builder import DATASETS
from .custom import CustomDataset


@DATASETS.register_module()
class MyRoadData(CustomDataset):
    
    CLASSES = ('background','road')

    PALETTE = [[0,0,0],[255, 255, 255]]

    def __init__(self, **kwargs):
    	super(MyRoadData, self).__init__(img_suffix='_sat.tif', seg_map_suffix='_mask.png', 
                     **kwargs)
    	assert osp.exists(self.img_dir)

2、修改mmseg/datasets/目录下的_init_.py

 把我的自定义数据集加到原_init_.py中

# Copyright (c) OpenMMLab. All rights reserved.
from .ade import ADE20KDataset
from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
from .chase_db1 import ChaseDB1Dataset
from .cityscapes import CityscapesDataset
from .coco_stuff import COCOStuffDataset
from .custom import CustomDataset
from .dark_zurich import DarkZurichDataset
from .dataset_wrappers import (ConcatDataset, MultiImageMixDataset,
                               RepeatDataset)
from .drive import DRIVEDataset
from .hrf import HRFDataset
from .isaid import iSAIDDataset
from .isprs import ISPRSDataset
from .loveda import LoveDADataset
from .night_driving import NightDrivingDataset
from .pascal_context import PascalContextDataset, PascalContextDataset59
from .potsdam import PotsdamDataset
from .stare import STAREDataset
from .voc import PascalVOCDataset
from .myroaddata import MyRoadData

__all__ = [
    'CustomDataset', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
    'DATASETS', 'build_dataset', 'PIPELINES', 'CityscapesDataset',
    'PascalVOCDataset', 'ADE20KDataset', 'PascalContextDataset',
    'PascalContextDataset59', 'ChaseDB1Dataset', 'DRIVEDataset', 'HRFDataset',
    'STAREDataset', 'DarkZurichDataset', 'NightDrivingDataset',
    'COCOStuffDataset', 'LoveDADataset', 'MultiImageMixDataset',
    'iSAIDDataset', 'ISPRSDataset', 'PotsdamDataset','MyRoadData'
]

3、在configs/base/datasets下面对数据加载进行定义

 我新建了一个myroad.py

里面的内容为

# dataset settings
dataset_type = 'MyRoadData'
data_root = 'data/MyRoadData'
img_norm_cfg = dict(
    mean=[0.5947, 0.5815, 0.5625], std=[0.1173, 0.1169, 0.1157], to_rgb=True)
img_scale = (512, 512)
crop_size = (256, 256)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations'),
    dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
    dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
    dict(type='RandomFlip', prob=0.5),
    dict(type='PhotoMetricDistortion'),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_semantic_seg'])
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=img_scale,
        # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img'])
        ])
]

data = dict(
    samples_per_gpu=4,
    workers_per_gpu=8,
    train=dict(
        type='RepeatDataset',
        times=40000,
        dataset=dict(
            type=dataset_type,
            data_root=data_root,
            img_dir='images/training',
            ann_dir='annotations/training',
            pipeline=train_pipeline)),
    val=dict(
        type=dataset_type,
        data_root=data_root,
        img_dir='images/validation',
        ann_dir='annotations/validation',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        data_root=data_root,
        img_dir='images/validation',
        ann_dir='annotations/validation',
        pipeline=test_pipeline))


4、在configs/下面选择你需要的模型参数进行修改

在configs/下面选择你需要的模型参数进行修改 以pspnet为例子,在configs/pspnet/下新建一个文件pspnet_r50-d8_512x1024_40k_myroaddata.py

_base_ = [
    '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/myroad.py',
    '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]

5、修改configs/base/models/下面的pspnet_r50-d8.py

# model settings
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
    type='EncoderDecoder',
    pretrained='open-mmlab://resnet50_v1c',
    backbone=dict(
        type='ResNetV1c',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        dilations=(1, 1, 2, 4),
        strides=(1, 2, 1, 1),
        norm_cfg=norm_cfg,
        norm_eval=False,
        style='pytorch',
        contract_dilation=True),
    decode_head=dict(
        type='PSPHead',
        in_channels=2048,
        in_index=3,
        channels=512,
        pool_scales=(1, 2, 3, 6),
        dropout_ratio=0.1,
        num_classes=19,
        norm_cfg=norm_cfg,
        align_corners=False,
        loss_decode=dict(
            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
    auxiliary_head=dict(
        type='FCNHead',
        in_channels=1024,
        in_index=2,
        channels=256,
        num_convs=1,
        concat_input=False,
        dropout_ratio=0.1,
        num_classes=19,
        norm_cfg=norm_cfg,
        align_corners=False,
        loss_decode=dict(
            type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
    # model training and testing settings
    train_cfg=dict(),
    test_cfg=dict(mode='whole'))

6、返回tools/train.py进行训练

python tools/train.py configs/pspnet/pspnet_r50-d8_512x1024_40k_myroaddata.py
就可以跑啦

结果图:

自定义数据集格式配置 

 在data文件夹下新建一个MyRoadData文件夹,存放数据

再次新建俩个文件夹

annotation和images下面新建training和validation文件夹

 annotation-training下放训练标签

annotation-validation放预测标签

同理

images-training放训练原图

images-validation下放预测原图

1、图片格式要求为8位深度

注意,如果是24位的图片要全部转成8位!!!!

不然会报错

转换代码如下

# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 16:50:20 2022

@author:Laney_Midory
csdn:Laney_Midory
"""
import cv2
import os

import glob
import shutil

import matplotlib.pyplot as plt
import numpy as np

from PIL import Image

import torch
import torch.nn as nn
import torch.utils.data as data
from torch.autograd import Variable as V

import pickle

from time import time



os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 指定第一块GPU可用

# config.gpu_options.per_process_gpu_memory_fraction = 0.7  # 程序最多只能占用指定gpu50%的显存,服务器上注释掉这句

Image.MAX_IMAGE_PIXELS = None

tar = "/home/wangtianni/SegNeXt-main/SegNeXt-main/data/data/MyRoadData/annotations/training/"
print('将24位深度转换为8位')
mask_names = filter(lambda x: x.find('png')!=-1, os.listdir(tar))
#trainlist = list(map(lambda x: x[:-8], imagelist))


#new_path = "C:/Users/Administrator/Desktop/white/"  # 目标文件夹


for file in mask_names:

    path = tar + file.strip()
    if not os.path.exists(path):
        continue;  
    img = Image.open(tar+file)#读取系统的内照片 
   
    img2 = img.convert('P')
   # print(train_path+'\\'+base_name[0]+'_mask.png')

    img2.save(path)
   
    #img2.save(new_path +path2 + "_mask.png")
    print("Finish deep change!")
   

2、图片一定要转换成0、1格式!!

如果不转换成0,1格式的话可以跑起来,但结果不对

 

 因为我的road是255,背景是0,现在要把road变成1,背景是0,代码如下:

# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 16:50:20 2022

@author:Laney_Midory
csdn:Laney_Midory
"""
import cv2
import os

import glob
import shutil

import matplotlib.pyplot as plt
import numpy as np

from PIL import Image

import torch
import torch.nn as nn
import torch.utils.data as data
from torch.autograd import Variable as V

import pickle

from time import time


os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 指定第一块GPU可用

# config.gpu_options.per_process_gpu_memory_fraction = 0.7  # 程序最多只能占用指定gpu50%的显存,服务器上注释掉这句

Image.MAX_IMAGE_PIXELS = None


tar = "/home/wangtianni/SegNeXt-main/data/MyRoadData/annotations/training/"
mask_list = os.listdir(tar) 

for file in mask_list:
    i = 0
    j = 0
    
    path = tar + file.strip()
    if not os.path.exists(path):
        continue;  
    img = Image.open(tar+file)#读取系统的内照片 
    

    width = img.size[0]#长度
    height = img.size[1]#宽度
    for i in range(0,width):#遍历所有长度的点
        for j in range(0,height):#遍历所有宽度的点
            data = (img.getpixel((i,j)))#打印该图片的所有点
            #print (data)#打印每个像素点的颜色RGBA的值(r,g,b)
            #print (data[0])#打印RGBA的r值
            if(data!=0):
                img.putpixel((i,j),1)
                data = (img.getpixel((i,j)))#打印该图片的所有点
            print(data)
                #img_array2[i, j] = (0, 0, 0)
    #img = img.convert("RGB")#把图片强制转成RGB
    print(path)
    img.save(path)#保存修改像素点后的图片
print("finish!")
   


如果想要看自己的图片像素值是不是0,1就直接print就可以啦

3、修改数据集的均值和方差!!

这点也很重要

修改SegNeXt-main/configs/_base_/datasets里的myroad.py

我的修改成了

img_norm_cfg = dict(
    mean=[0.5947, 0.5815, 0.5625], std=[0.1173, 0.1169, 0.1157], to_rgb=True)

需要计算一下图片的方差,因为这个值不对的话也还是跑不出来的

到这一步 你就觉得可以成功跑起来了么

如果这样想 那你就大错特错啦

运行结果报错说

  File "/home/wangtianni/.conda/envs/pytorch/lib/python3.6/site-packages/torch/nn/functional.py", line 2248, in _verify_batch_size
    raise ValueError("Expected more than 1 value per channel when training, got input size {}".format(size))
ValueError: Expected more than 1 value per channel when training, got input size torch.Size([1, 512, 1, 1])
 

输入torchsize为[1,1,512,1]

但是程序要求的是1个size

我就很奇怪

我明明已经把图片设置成了8位

怎么还会报错

看了半天后经过各种实验

终于让我找到了解决思路:

再次运行一遍步骤1!!!!

4、再次运行步骤一的图片深度改变成8位的代码

就可以正常运行啦!

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 1
    评论
Faster R-CNN 是一种基于深度学习的目标检测算法,它可以用于训练自己的数据集。下面是 Faster R-CNN 训练自己数据集代码示例: 1. 准备训练数据集 首先需要准备训练数据集,包括图像和标注文件。标注文件可以是 VOC 格式或 COCO 格式。 2. 安装依赖库和下载代码 需要安装 TensorFlow 和 Keras,以及下载 Faster R-CNN 的代码。 3. 修改配置文件 修改 Faster R-CNN 的配置文件,包括训练和测试的参数、数据集路径以及模型保存路径等。 4. 训练模型 运行训练代码,使用准备好的数据集进行训练,直到模型收敛或达到预设的训练轮数。 5. 测试模型 使用测试数据集训练好的模型进行测试,评估模型的准确率和召回率等指标。 6. 模型优化 根据测试结果对模型进行优化,包括调整参数、增加数据集大小等。 参考代码: 以下是 Faster R-CNN 训练自己数据集代码示例。这里以 TensorFlow 和 Keras 为例,代码中的数据集为 VOC 格式。 ```python # 导入依赖库 import tensorflow as tf from keras import backend as K from keras.layers import Input from keras.models import Model from keras.optimizers import Adam from keras.utils import plot_model from keras.callbacks import TensorBoard, ModelCheckpoint from keras_frcnn import config from keras_frcnn import data_generators from keras_frcnn import losses as losses_fn from keras_frcnn import roi_helpers from keras_frcnn import resnet as nn from keras_frcnn import visualize # 设置配置文件 config_output_filename = 'config.pickle' network = 'resnet50' num_epochs = 1000 output_weight_path = './model_frcnn.hdf5' input_weight_path = './resnet50_weights_tf_dim_ordering_tf_kernels.h5' tensorboard_dir = './logs' train_path = './train.txt' test_path = './test.txt' num_rois = 32 horizontal_flips = True vertical_flips = True rot_90 = True output_weight_path = './model_frcnn.hdf5' # 加载配置文件 config = config.Config() config_output_filename = 'config.pickle' # 加载数据集 all_imgs, classes_count, class_mapping = data_generators.get_data(train_path) test_imgs, _, _ = data_generators.get_data(test_path) # 计算平均像素值 if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) config.class_mapping = class_mapping # 计算平均像素值 C = config.num_channels mean_pixel = [103.939, 116.779, 123.68] img_size = (config.im_size, config.im_size) # 组装模型 input_shape_img = (None, None, C) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(num_rois, 4)) shared_layers = nn.nn_base(img_input, trainable=True) # RPN 网络 num_anchors = len(config.anchor_box_scales) * len(config.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) # RoI 网络 classifier = nn.classifier(shared_layers, roi_input, num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier = Model([img_input, roi_input], classifier) # 加载权重 model_rpn.load_weights(input_weight_path, by_name=True) model_classifier.load_weights(input_weight_path, by_name=True) # 生成训练数据 data_gen_train = data_generators.get_anchor_gt(all_imgs, classes_count, C, K.image_dim_ordering(), mode='train', \ img_size=img_size, \ num_rois=num_rois, \ horizontal_flips=horizontal_flips, \ vertical_flips=vertical_flips, \ rot_90=rot_90) # 编译模型 optimizer = Adam(lr=1e-5) model_rpn.compile(optimizer=optimizer, loss=[losses_fn.rpn_loss_cls(num_anchors), losses_fn.rpn_loss_regr(num_anchors)]) model_classifier.compile(optimizer=optimizer, loss=[losses_fn.class_loss_cls, losses_fn.class_loss_regr(len(classes_count) - 1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) # 训练模型 epoch_length = 1000 num_epochs = int(num_epochs) iter_num = 0 losses = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] start_time = time.time() best_loss = np.Inf class_mapping_inv = {v: k for k, v in class_mapping.items()} print('Starting training') for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) print('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) while True: try: if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.') X, Y, img_data = next(data_gen_train) loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue # sampling positive/negative samples neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] losses[iter_num, 2] = loss_class[1] losses[iter_num, 3] = loss_class[2] losses[iter_num, 4] = loss_class[3] iter_num += 1 progbar.update(iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3])), ('mean_overlapping_bboxes', float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch))]) if iter_num == epoch_length: loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes)) print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format(loss_class_cls)) print('Loss Detector regression: {}'.format(loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr iter_num = 0 start_time = time.time() if curr_loss < best_loss: if C.verbose: print('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss)) best_loss = curr_loss model_rpn.save_weights(output_weight_path) model_classifier.save_weights(output_weight_path) break except Exception as e: print('Exception: {}'.format(e)) continue print('Training complete, exiting.') ``` 这是一个简单的 Faster R-CNN 训练自己数据集的示例代码,可以根据自己的数据集和需求进行修改和优化。

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Laney_Midory

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值