Trans2Seg代码运行

Clone源码

源码地址点击此处

https://github.com/xieenze/Trans2Seg.git

然后你需要修改Trans2Seg-master/segmentron/solver/loss.py文件,删除或注释第九行的from ..models.pointrend import point_sample,同时需要删除或注释掉391-414行的内容删除438-440行的内容,最终你的Trans2Seg-master/segmentron/solver/loss.py具有如下内容

"""Custom losses."""
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F

from torch.autograd import Variable
from .lovasz_losses import lovasz_softmax
from ..data.dataloader import datasets
from ..config import cfg

__all__ = ['get_segmentation_loss']

class TranslabLoss(nn.CrossEntropyLoss):
    def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs):
        super(TranslabLoss, self).__init__(ignore_index=ignore_index)
        self.aux = aux
        self.aux_weight = aux_weight

    def _aux_forward(self, *inputs, **kwargs):
        *preds, target = tuple(inputs)

        loss = super(TranslabLoss, self).forward(preds[0], target)
        for i in range(1, len(preds)):
            aux_loss = super(TranslabLoss, self).forward(preds[i], target)
            loss += self.aux_weight * aux_loss
        return loss

    def _multiple_forward(self, *inputs):
        *preds, target = tuple(inputs)
        loss = super(TranslabLoss, self).forward(preds[0], target)
        for i in range(1, len(preds)):
            loss += super(TranslabLoss, self).forward(preds[i], target)
        return loss

    def forward(self, *inputs, **kwargs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])

        loss = dict(loss=super(TranslabLoss, self).forward(*inputs))
        return loss

class MixSoftmaxCrossEntropyLoss(nn.CrossEntropyLoss):
    def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs):
        super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_index=ignore_index)
        self.aux = aux
        self.aux_weight = aux_weight

    def _aux_forward(self, *inputs, **kwargs):
        *preds, target = tuple(inputs)

        loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target)
        for i in range(1, len(preds)):
            aux_loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target)
            loss += self.aux_weight * aux_loss
        return loss

    def _multiple_forward(self, *inputs):
        *preds, target = tuple(inputs)
        loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target)
        for i in range(1, len(preds)):
            loss += super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target)
        return loss

    def forward(self, *inputs, **kwargs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])
        if self.aux:
            return dict(loss=self._aux_forward(*inputs))
        elif len(preds) > 1:
            return dict(loss=self._multiple_forward(*inputs))
        else:
            return dict(loss=super(MixSoftmaxCrossEntropyLoss, self).forward(*inputs))


class ICNetLoss(nn.CrossEntropyLoss):
    """Cross Entropy Loss for ICNet"""
    def __init__(self, aux_weight=0.4, ignore_index=-1, **kwargs):
        super(ICNetLoss, self).__init__(ignore_index=ignore_index)
        self.aux_weight = aux_weight

    def forward(self, *inputs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])

        pred, pred_sub4, pred_sub8, pred_sub16, target = tuple(inputs)
        # [batch, W, H] -> [batch, 1, W, H]
        target = target.unsqueeze(1).float()
        target_sub4 = F.interpolate(target, pred_sub4.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long()
        target_sub8 = F.interpolate(target, pred_sub8.size()[2:], mode='bilinear', align_corners=True).squeeze(1).long()
        target_sub16 = F.interpolate(target, pred_sub16.size()[2:], mode='bilinear', align_corners=True).squeeze(
            1).long()
        loss1 = super(ICNetLoss, self).forward(pred_sub4, target_sub4)
        loss2 = super(ICNetLoss, self).forward(pred_sub8, target_sub8)
        loss3 = super(ICNetLoss, self).forward(pred_sub16, target_sub16)
        return dict(loss=loss1 + loss2 * self.aux_weight + loss3 * self.aux_weight)


class OhemCrossEntropy2d(nn.Module):
    def __init__(self, ignore_index=-1, thresh=0.7, min_kept=100000, use_weight=True, **kwargs):
        super(OhemCrossEntropy2d, self).__init__()
        self.ignore_index = ignore_index
        self.thresh = float(thresh)
        self.min_kept = int(min_kept)
        if use_weight:
            weight = torch.FloatTensor([0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754,
                                        1.0489, 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,
                                        1.0865, 1.1529, 1.0507])
            self.criterion = torch.nn.CrossEntropyLoss(weight=weight, ignore_index=ignore_index)
        else:
            self.criterion = torch.nn.CrossEntropyLoss(ignore_index=ignore_index)

    def forward(self, pred, target):
        n, c, h, w = pred.size()
        target = target.view(-1)
        valid_mask = target.ne(self.ignore_index)
        target = target * valid_mask.long()
        num_valid = valid_mask.sum()

        prob = F.softmax(pred, dim=1)
        prob = prob.transpose(0, 1).reshape(c, -1)

        if self.min_kept > num_valid:
            print("Lables: {}".format(num_valid))
        elif num_valid > 0:
            # prob = prob.masked_fill_(1 - valid_mask, 1)
            prob = prob.masked_fill_(~valid_mask, 1)
            mask_prob = prob[target, torch.arange(len(target), dtype=torch.long)]
            threshold = self.thresh
            if self.min_kept > 0:
                index = mask_prob.argsort()
                threshold_index = index[min(len(index), self.min_kept) - 1]
                if mask_prob[threshold_index] > self.thresh:
                    threshold = mask_prob[threshold_index]
            kept_mask = mask_prob.le(threshold)
            valid_mask = valid_mask * kept_mask
            target = target * kept_mask.long()

        target = target.masked_fill_(~valid_mask, self.ignore_index)
        target = target.view(n, h, w)

        return self.criterion(pred, target)


class EncNetLoss(nn.CrossEntropyLoss):
    """2D Cross Entropy Loss with SE Loss"""

    def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs):
        super(EncNetLoss, self).__init__(weight, None, ignore_index)
        self.se_loss = cfg.MODEL.ENCNET.SE_LOSS
        self.se_weight = cfg.MODEL.ENCNET.SE_WEIGHT
        self.nclass = datasets[cfg.DATASET.NAME].NUM_CLASS
        self.aux = aux
        self.aux_weight = aux_weight
        self.bceloss = nn.BCELoss(weight)

    def forward(self, *inputs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])
        if not self.se_loss and not self.aux:
            return super(EncNetLoss, self).forward(*inputs)
        elif not self.se_loss:
            pred1, pred2, target = tuple(inputs)
            loss1 = super(EncNetLoss, self).forward(pred1, target)
            loss2 = super(EncNetLoss, self).forward(pred2, target)
            return dict(loss=loss1 + self.aux_weight * loss2)
        elif not self.aux:
            pred, se_pred, target = tuple(inputs)
            se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred)
            loss1 = super(EncNetLoss, self).forward(pred, target)
            loss2 = self.bceloss(torch.sigmoid(se_pred), se_target)
            return dict(loss=loss1 + self.se_weight * loss2)
        else:
            pred1, se_pred, pred2, target = tuple(inputs)
            se_target = self._get_batch_label_vector(target, nclass=self.nclass).type_as(pred1)
            loss1 = super(EncNetLoss, self).forward(pred1, target)
            loss2 = super(EncNetLoss, self).forward(pred2, target)
            loss3 = self.bceloss(torch.sigmoid(se_pred), se_target)
            return dict(loss=loss1 + self.aux_weight * loss2 + self.se_weight * loss3)

    @staticmethod
    def _get_batch_label_vector(target, nclass):
        # target is a 3D Variable BxHxW, output is 2D BxnClass
        batch = target.size(0)
        tvect = Variable(torch.zeros(batch, nclass))
        for i in range(batch):
            hist = torch.histc(target[i].cpu().data.float(),
                               bins=nclass, min=0,
                               max=nclass - 1)
            vect = hist > 0
            tvect[i] = vect
        return tvect


class MixSoftmaxCrossEntropyOHEMLoss(OhemCrossEntropy2d):
    def __init__(self, aux=False, aux_weight=0.4, weight=None, ignore_index=-1, **kwargs):
        super(MixSoftmaxCrossEntropyOHEMLoss, self).__init__(ignore_index=ignore_index)
        self.aux = aux
        self.aux_weight = aux_weight
        self.bceloss = nn.BCELoss(weight)

    def _aux_forward(self, *inputs, **kwargs):
        *preds, target = tuple(inputs)

        loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[0], target)
        for i in range(1, len(preds)):
            aux_loss = super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(preds[i], target)
            loss += self.aux_weight * aux_loss
        return loss

    def forward(self, *inputs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])
        if self.aux:
            return dict(loss=self._aux_forward(*inputs))
        else:
            return dict(loss=super(MixSoftmaxCrossEntropyOHEMLoss, self).forward(*inputs))


class LovaszSoftmax(nn.Module):
    def __init__(self, aux=True, aux_weight=0.2, ignore_index=-1, **kwargs):
        super(LovaszSoftmax, self).__init__()
        self.aux = aux
        self.aux_weight = aux_weight
        self.ignore_index = ignore_index

    def _aux_forward(self, *inputs, **kwargs):
        *preds, target = tuple(inputs)

        loss = lovasz_softmax(F.softmax(preds[0], dim=1), target, ignore=self.ignore_index)
        for i in range(1, len(preds)):
            aux_loss = lovasz_softmax(F.softmax(preds[i], dim=1), target, ignore=self.ignore_index)
            loss += self.aux_weight * aux_loss
        return loss

    def _multiple_forward(self, *inputs):
        *preds, target = tuple(inputs)
        loss = super(MixSoftmaxCrossEntropyLoss, self).forward(preds[0], target)
        for i in range(1, len(preds)):
            loss += super(MixSoftmaxCrossEntropyLoss, self).forward(preds[i], target)
        return loss

    def forward(self, *inputs, **kwargs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])
        if self.aux:
            return dict(loss=self._aux_forward(*inputs))
        elif len(preds) > 1:
            return dict(loss=self._multiple_forward(*inputs))
        else:
            return dict(loss=super(MixSoftmaxCrossEntropyLoss, self).forward(*inputs))


class FocalLoss(nn.Module):
    def __init__(self, alpha=0.5, gamma=2, weight=None, aux=True, aux_weight=0.2, ignore_index=-1,
                 size_average=True):
        super().__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.weight = weight
        self.ignore_index = ignore_index
        self.aux = aux
        self.aux_weight = aux_weight
        self.size_average = size_average
        self.ce_fn = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index)

    def _aux_forward(self, *inputs, **kwargs):
        *preds, target = tuple(inputs)

        loss = self._base_forward(preds[0], target)
        for i in range(1, len(preds)):
            aux_loss = self._base_forward(preds[i], target)
            loss += self.aux_weight * aux_loss
        return loss

    def _base_forward(self, output, target):

        if output.dim() > 2:
            output = output.contiguous().view(output.size(0), output.size(1), -1)
            output = output.transpose(1, 2)
            output = output.contiguous().view(-1, output.size(2)).squeeze()
        if target.dim() == 4:
            target = target.contiguous().view(target.size(0), target.size(1), -1)
            target = target.transpose(1, 2)
            target = target.contiguous().view(-1, target.size(2)).squeeze()
        elif target.dim() == 3:
            target = target.view(-1)
        else:
            target = target.view(-1, 1)

        logpt = self.ce_fn(output, target)
        pt = torch.exp(-logpt)
        loss = ((1 - pt) ** self.gamma) * self.alpha * logpt
        if self.size_average:
            return loss.mean()
        else:
            return loss.sum()

    def forward(self, *inputs, **kwargs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])
        return dict(loss=self._aux_forward(*inputs))


class BinaryDiceLoss(nn.Module):
    """Dice loss of binary class
    Args:
        smooth: A float number to smooth loss, and avoid NaN error, default: 1
        p: Denominator value: \sum{x^p} + \sum{y^p}, default: 2
        predict: A tensor of shape [N, *]
        target: A tensor of shape same with predict
        reduction: Reduction method to apply, return mean over batch if 'mean',
            return sum if 'sum', return a tensor of shape [N,] if 'none'
    Returns:
        Loss tensor according to arg reduction
    Raise:
        Exception if unexpected reduction
    """
    def __init__(self, smooth=1, p=2, reduction='mean'):
        super(BinaryDiceLoss, self).__init__()
        self.smooth = smooth
        self.p = p
        self.reduction = reduction

    def forward(self, predict, target, valid_mask):
        assert predict.shape[0] == target.shape[0], "predict & target batch size don't match"
        predict = predict.contiguous().view(predict.shape[0], -1)
        target = target.contiguous().view(target.shape[0], -1)
        valid_mask = valid_mask.contiguous().view(valid_mask.shape[0], -1)

        num = torch.sum(torch.mul(predict, target) * valid_mask, dim=1) * 2 + self.smooth
        den = torch.sum((predict.pow(self.p) + target.pow(self.p)) * valid_mask, dim=1) + self.smooth

        loss = 1 - num / den

        if self.reduction == 'mean':
            return loss.mean()
        elif self.reduction == 'sum':
            return loss.sum()
        elif self.reduction == 'none':
            return loss
        else:
            raise Exception('Unexpected reduction {}'.format(self.reduction))


class DiceLoss(nn.Module):
    """Dice loss, need one hot encode input"""

    def __init__(self, weight=None, aux=True, aux_weight=0.4, ignore_index=-1, **kwargs):
        super(DiceLoss, self).__init__()
        self.kwargs = kwargs
        self.weight = weight
        self.ignore_index = ignore_index
        self.aux = aux
        self.aux_weight = aux_weight

    def _base_forward(self, predict, target, valid_mask):

        dice = BinaryDiceLoss(**self.kwargs)
        total_loss = 0
        predict = F.softmax(predict, dim=1)

        for i in range(target.shape[-1]):
            if i != self.ignore_index:
                dice_loss = dice(predict[:, i], target[..., i], valid_mask)
                if self.weight is not None:
                    assert self.weight.shape[0] == target.shape[1], \
                        'Expect weight shape [{}], get[{}]'.format(target.shape[1], self.weight.shape[0])
                    dice_loss *= self.weights[i]
                total_loss += dice_loss

        return total_loss / target.shape[-1]

    def _aux_forward(self, *inputs, **kwargs):
        *preds, target = tuple(inputs)
        valid_mask = (target != self.ignore_index).long()
        target_one_hot = F.one_hot(torch.clamp_min(target, 0))
        loss = self._base_forward(preds[0], target_one_hot, valid_mask)
        for i in range(1, len(preds)):
            aux_loss = self._base_forward(preds[i], target_one_hot, valid_mask)
            loss += self.aux_weight * aux_loss
        return loss

    def forward(self, *inputs):
        preds, target = tuple(inputs)
        inputs = tuple(list(preds) + [target])
        return dict(loss=self._aux_forward(*inputs))


def get_segmentation_loss(model, use_ohem=False, **kwargs):
    if use_ohem:
        return MixSoftmaxCrossEntropyOHEMLoss(**kwargs)
    elif cfg.SOLVER.LOSS_NAME == 'lovasz':
        logging.info('Use lovasz loss!')
        return LovaszSoftmax(**kwargs)
    elif cfg.SOLVER.LOSS_NAME == 'focal':
        logging.info('Use focal loss!')
        return FocalLoss(**kwargs)
    elif cfg.SOLVER.LOSS_NAME == 'dice':
        logging.info('Use dice loss!')
        return DiceLoss(**kwargs)
    elif cfg.SOLVER.LOSS_NAME == 'binary_dice':
        logging.info('Use binary_dice loss!')
        return BinaryDiceLoss(**kwargs)

    model = model.lower()
    if model == 'icnet':
        return ICNetLoss(**kwargs)
    elif model == 'encnet':
        return EncNetLoss(**kwargs)
    elif model == 'translab':
        return TranslabLoss(**kwargs)
    else:
        return MixSoftmaxCrossEntropyLoss(**kwargs)

准备数据集

数据集的目录结构如下,注意到此目录结构在datasets/transparent

Trans10K_v2
├── test
│   ├── images
│   └── masks_12
├── train
│   ├── images
│   └── masks_12
└── validation
    ├── images
    └── masks_12

谷歌云盘下载: Google Drive.
百度云盘下载Baidu Drive. code: oqms

代码编译并运行

apex编译(可选)

git clone https://github.com/NVIDIA/apex
cd apex
python setup.py install

segmentron编译

cd XXX/Trans2Seg
pip install thop
pip install -U pyyaml
python setup.py develop --user

运行

如果你是单卡,可用如下方式运行

注意到此仓库如果使用 Windows 平台运行应该设置 num_works=0来减少可能出现的错误
python tools/train.py --config-file configs/trans10kv2/trans2seg/trans2seg_medium.yaml
2021-07-05 05:56:52,829 Segmentron INFO: Using 1 GPUs
2021-07-05 05:56:52,830 Segmentron INFO: Namespace(config_file='configs/trans10kv2/trans2seg/trans2seg_medium.yaml', device='cuda', distributed=False, input_img='tools/demo_vis.png', local_rank=0, log_iter=10, no_cuda=False, num_gpus=1, opts=[], resume=None, skip_val=False, test=False, val_epoch=1, vis=False)
2021-07-05 05:56:52,830 Segmentron INFO: {
        "SEED": 1024,
        "TIME_STAMP": "2021-07-05-05-56",
        "ROOT_PATH": "/content/Trans2Seg/Trans2Seg",
        "PHASE": "train",
        "DATASET": {
                "NAME": "transparent11",
                "MEAN": [
                        0.485,
                        0.456,
                        0.406
                ],
                "STD": [
                        0.229,
                        0.224,
                        0.225
                ],
                "IGNORE_INDEX": -1,
                "WORKERS": 8,
                "MODE": "testval"
        },
        "AUG": {
                "MIRROR": true,
                "BLUR_PROB": 0.0,
                "BLUR_RADIUS": 0.0,
                "COLOR_JITTER": null,
                "CROP": false
        },
        "TRAIN": {
                "EPOCHS": 50,
                "BATCH_SIZE": 4,
                "CROP_SIZE": [
                        512,
                        512
                ],
                "BASE_SIZE": 512,
                "MODEL_SAVE_DIR": "workdirs/trans10kv2/trans2seg_small",
                "LOG_SAVE_DIR": "workdirs/",
                "PRETRAINED_MODEL_PATH": "",
                "BACKBONE_PRETRAINED": true,
                "BACKBONE_PRETRAINED_PATH": "",
                "RESUME_MODEL_PATH": "",
                "SYNC_BATCH_NORM": true,
                "SNAPSHOT_EPOCH": 1,
                "APEX": false
        },
        "SOLVER": {
                "LR": 0.0001,
                "OPTIMIZER": "adam",
                "EPSILON": 1e-08,
                "MOMENTUM": 0.9,
                "WEIGHT_DECAY": 0.0001,
                "DECODER_LR_FACTOR": 10.0,
                "LR_SCHEDULER": "poly",
                "POLY": {
                        "POWER": 0.9
                },
                "STEP": {
                        "GAMMA": 0.1,
                        "DECAY_EPOCH": [
                                10,
                                20
                        ]
                },
                "WARMUP": {
                        "EPOCHS": 0.0,
                        "FACTOR": 0.3333333333333333,
                        "METHOD": "linear"
                },
                "OHEM": false,
                "AUX": false,
                "AUX_WEIGHT": 0.4,
                "LOSS_NAME": ""
        },
        "TEST": {
                "TEST_MODEL_PATH": "",
                "BATCH_SIZE": 16,
                "CROP_SIZE": [
                        512,
                        512
                ],
                "SCALES": [
                        1.0
                ],
                "FLIP": false
        },
        "VISUAL": {
                "OUTPUT_DIR": "../runs/visual/"
        },
        "MODEL": {
                "MODEL_NAME": "Trans2Seg",
                "BACKBONE": "resnet50c",
                "BACKBONE_SCALE": 1.0,
                "MULTI_LOSS_WEIGHT": [
                        1.0
                ],
                "DEFAULT_GROUP_NUMBER": 32,
                "DEFAULT_EPSILON": 1e-05,
                "BN_TYPE": "BN",
                "BN_EPS_FOR_ENCODER": null,
                "BN_EPS_FOR_DECODER": null,
                "OUTPUT_STRIDE": 16,
                "BN_MOMENTUM": null,
                "DANET": {
                        "MULTI_DILATION": null,
                        "MULTI_GRID": false
                },
                "DEEPLABV3_PLUS": {
                        "USE_ASPP": true,
                        "ENABLE_DECODER": true,
                        "ASPP_WITH_SEP_CONV": true,
                        "DECODER_USE_SEP_CONV": true
                },
                "OCNet": {
                        "OC_ARCH": "base"
                },
                "ENCNET": {
                        "SE_LOSS": true,
                        "SE_WEIGHT": 0.2,
                        "LATERAL": true
                },
                "CCNET": {
                        "RECURRENCE": 2
                },
                "CGNET": {
                        "STAGE2_BLOCK_NUM": 3,
                        "STAGE3_BLOCK_NUM": 21
                },
                "POINTREND": {
                        "BASEMODEL": "DeepLabV3_Plus"
                },
                "HRNET": {
                        "PRETRAINED_LAYERS": [
                                "*"
                        ],
                        "STEM_INPLANES": 64,
                        "FINAL_CONV_KERNEL": 1,
                        "WITH_HEAD": true,
                        "STAGE1": {
                                "NUM_MODULES": 1,
                                "NUM_BRANCHES": 1,
                                "NUM_BLOCKS": [
                                        1
                                ],
                                "NUM_CHANNELS": [
                                        32
                                ],
                                "BLOCK": "BOTTLENECK",
                                "FUSE_METHOD": "SUM"
                        },
                        "STAGE2": {
                                "NUM_MODULES": 1,
                                "NUM_BRANCHES": 2,
                                "NUM_BLOCKS": [
                                        4,
                                        4
                                ],
                                "NUM_CHANNELS": [
                                        32,
                                        64
                                ],
                                "BLOCK": "BASIC",
                                "FUSE_METHOD": "SUM"
                        },
                        "STAGE3": {
                                "NUM_MODULES": 1,
                                "NUM_BRANCHES": 3,
                                "NUM_BLOCKS": [
                                        4,
                                        4,
                                        4
                                ],
                                "NUM_CHANNELS": [
                                        32,
                                        64,
                                        128
                                ],
                                "BLOCK": "BASIC",
                                "FUSE_METHOD": "SUM"
                        },
                        "STAGE4": {
                                "NUM_MODULES": 1,
                                "NUM_BRANCHES": 4,
                                "NUM_BLOCKS": [
                                        4,
                                        4,
                                        4,
                                        4
                                ],
                                "NUM_CHANNELS": [
                                        32,
                                        64,
                                        128,
                                        256
                                ],
                                "BLOCK": "BASIC",
                                "FUSE_METHOD": "SUM"
                        }
                },
                "TRANS2Seg": {
                        "embed_dim": 256,
                        "depth": 4,
                        "num_heads": 8,
                        "mlp_ratio": 3.0,
                        "hid_dim": 64
                }
        }
}
2021-07-05 05:56:52,871 Segmentron INFO: Found 5000 images in the folder /content/Trans2Seg/Trans2Seg/datasets/transparent/Trans10K_cls12
2021-07-05 05:56:52,880 Segmentron INFO: Found 1000 images in the folder /content/Trans2Seg/Trans2Seg/datasets/transparent/Trans10K_cls12
2021-07-05 05:56:52,914 Segmentron INFO: Found 4428 images in the folder /content/Trans2Seg/Trans2Seg/datasets/transparent/Trans10K_cls12
/usr/local/lib/python3.7/dist-packages/torch/utils/data/dataloader.py:481: UserWarning: This DataLoader will create 8 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.
  cpuset_checked))
2021-07-05 05:56:53,368 Segmentron INFO: load backbone pretrained model from url..
Downloading: "https://github.com/LikeLy-Journey/SegmenTron/releases/download/v0.1.0/resnet50-25c4b509.pth" to /root/.cache/torch/hub/checkpoints/resnet50-25c4b509.pth
100% 98.2M/98.2M [00:01<00:00, 97.6MB/s]
2021-07-05 05:56:54,723 Segmentron INFO: <All keys matched successfully>
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at  /pytorch/c10/core/TensorImpl.h:1156.)
  return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)
2021-07-05 05:57:09,046 Segmentron INFO: Trans2Seg flops: 49.034G input shape is [3, 512, 512], params: 56.215M
2021-07-05 05:57:09,046 Segmentron INFO: Not use SyncBatchNorm!
2021-07-05 05:57:09,048 Segmentron INFO: Start training, Total Epochs: 50 = Total Iterations 62500
2021-07-05 05:57:19,654 Segmentron INFO: Epoch: 1/50 || Iters: 10/1250 || Lr: 0.000100 || Loss: 1.6398 || Cost Time: 0:00:10 || Estimated Time: 18:24:17
2021-07-05 05:57:24,757 Segmentron INFO: Epoch: 1/50 || Iters: 20/1250 || Lr: 0.000100 || Loss: 1.3416 || Cost Time: 0:00:15 || Estimated Time: 13:37:45
2021-07-05 05:57:29,885 Segmentron INFO: Epoch: 1/50 || Iters: 30/1250 || Lr: 0.000100 || Loss: 0.6450 || Cost Time: 0:00:20 || Estimated Time: 12:03:02
2021-07-05 05:57:35,063 Segmentron INFO: Epoch: 1/50 || Iters: 40/1250 || Lr: 0.000100 || Loss: 0.8031 || Cost Time: 0:00:26 || Estimated Time: 11:17:00
2021-07-05 05:57:40,265 Segmentron INFO: Epoch: 1/50 || Iters: 50/1250 || Lr: 0.000100 || Loss: 0.7714 || Cost Time: 0:00:31 || Estimated Time: 10:49:46
2021-07-05 05:57:45,509 Segmentron INFO: Epoch: 1/50 || Iters: 60/1250 || Lr: 0.000100 || Loss: 0.8005 || Cost Time: 0:00:36 || Estimated Time: 10:32:19
2021-07-05 05:57:50,793 Segmentron INFO: Epoch: 1/50 || Iters: 70/1250 || Lr: 0.000100 || Loss: 1.8231 || Cost Time: 0:00:41 || Estimated Time: 10:20:26
2021-07-05 05:57:56,126 Segmentron INFO: Epoch: 1/50 || Iters: 80/1250 || Lr: 0.000100 || Loss: 1.7568 || Cost Time: 0:00:47 || Estimated Time: 10:12:09
2021-07-05 05:58:01,511 Segmentron INFO: Epoch: 1/50 || Iters: 90/1250 || Lr: 0.000100 || Loss: 0.6441 || Cost Time: 0:00:52 || Estimated Time: 10:06:17
2021-07-05 05:58:06,946 Segmentron INFO: Epoch: 1/50 || Iters: 100/1250 || Lr: 0.000100 || Loss: 0.6335 || Cost Time: 0:00:57 || Estimated Time: 10:02:05
2021-07-05 05:58:12,436 Segmentron INFO: Epoch: 1/50 || Iters: 110/1250 || Lr: 0.000100 || Loss: 1.1686 || Cost Time: 0:01:03 || Estimated Time: 9:59:10
2021-07-05 05:58:17,987 Segmentron INFO: Epoch: 1/50 || Iters: 120/1250 || Lr: 0.000100 || Loss: 1.0597 || Cost Time: 0:01:08 || Estimated Time: 9:57:15
2021-07-05 05:58:23,625 Segmentron INFO: Epoch: 1/50 || Iters: 130/1250 || Lr: 0.000100 || Loss: 0.6590 || Cost Time: 0:01:14 || Estimated Time: 9:56:17
2021-07-05 05:58:29,321 Segmentron INFO: Epoch: 1/50 || Iters: 140/1250 || Lr: 0.000100 || Loss: 1.2513 || Cost Time: 0:01:20 || Estimated Time: 9:55:53
2021-07-05 05:58:35,029 Segmentron INFO: Epoch: 1/50 || Iters: 150/1250 || Lr: 0.000100 || Loss: 0.9338 || Cost Time: 0:01:25 || Estimated Time: 9:55:37
2021-07-05 05:58:40,683 Segmentron INFO: Epoch: 1/50 || Iters: 160/1250 || Lr: 0.000100 || Loss: 0.4313 || Cost Time: 0:01:31 || Estimated Time: 9:55:01
2021-07-05 05:58:46,268 Segmentron INFO: Epoch: 1/50 || Iters: 170/1250 || Lr: 0.000100 || Loss: 0.5899 || Cost Time: 0:01:37 || Estimated Time: 9:54:03
2021-07-05 05:58:51,802 Segmentron INFO: Epoch: 1/50 || Iters: 180/1250 || Lr: 0.000100 || Loss: 0.9178 || Cost Time: 0:01:42 || Estimated Time: 9:52:54
2021-07-05 05:58:57,312 Segmentron INFO: Epoch: 1/50 || Iters: 190/1250 || Lr: 0.000100 || Loss: 0.7366 || Cost Time: 0:01:48 || Estimated Time: 9:51:43
2021-07-05 05:59:02,790 Segmentron INFO: Epoch: 1/50 || Iters: 200/1250 || Lr: 0.000100 || Loss: 0.6236 || Cost Time: 0:01:53 || Estimated Time: 9:50:29
2021-07-05 05:59:08,267 Segmentron INFO: Epoch: 1/50 || Iters: 210/1250 || Lr: 0.000100 || Loss: 1.0859 || Cost Time: 0:01:59 || Estimated Time: 9:49:21
2021-07-05 05:59:13,726 Segmentron INFO: Epoch: 1/50 || Iters: 220/1250 || Lr: 0.000100 || Loss: 1.0176 || Cost Time: 0:02:04 || Estimated Time: 9:48:14
2021-07-05 05:59:19,187 Segmentron INFO: Epoch: 1/50 || Iters: 230/1250 || Lr: 0.000100 || Loss: 0.9893 || Cost Time: 0:02:10 || Estimated Time: 9:47:12
2021-07-05 05:59:24,645 Segmentron INFO: Epoch: 1/50 || Iters: 240/1250 || Lr: 0.000100 || Loss: 0.8127 || Cost Time: 0:02:15 || Estimated Time: 9:46:15
2021-07-05 05:59:30,120 Segmentron INFO: Epoch: 1/50 || Iters: 250/1250 || Lr: 0.000100 || Loss: 1.3774 || Cost Time: 0:02:21 || Estimated Time: 9:45:25
2021-07-05 05:59:35,597 Segmentron INFO: Epoch: 1/50 || Iters: 260/1250 || Lr: 0.000100 || Loss: 0.4150 || Cost Time: 0:02:26 || Estimated Time: 9:44:40
2021-07-05 05:59:41,077 Segmentron INFO: Epoch: 1/50 || Iters: 270/1250 || Lr: 0.000100 || Loss: 0.6798 || Cost Time: 0:02:32 || Estimated Time: 9:43:59
2021-07-05 05:59:46,586 Segmentron INFO: Epoch: 1/50 || Iters: 280/1250 || Lr: 0.000100 || Loss: 0.3734 || Cost Time: 0:02:37 || Estimated Time: 9:43:26
2021-07-05 05:59:52,103 Segmentron INFO: Epoch: 1/50 || Iters: 290/1250 || Lr: 0.000100 || Loss: 0.7941 || Cost Time: 0:02:43 || Estimated Time: 9:42:56
2021-07-05 05:59:57,639 Segmentron INFO: Epoch: 1/50 || Iters: 300/1250 || Lr: 0.000100 || Loss: 0.5892 || Cost Time: 0:02:48 || Estimated Time: 9:42:33
2021-07-05 06:00:03,177 Segmentron INFO: Epoch: 1/50 || Iters: 310/1250 || Lr: 0.000100 || Loss: 0.5118 || Cost Time: 0:02:54 || Estimated Time: 9:42:11
2021-07-05 06:00:08,715 Segmentron INFO: Epoch: 1/50 || Iters: 320/1250 || Lr: 0.000100 || Loss: 0.6326 || Cost Time: 0:02:59 || Estimated Time: 9:41:50
2021-07-05 06:00:14,250 Segmentron INFO: Epoch: 1/50 || Iters: 330/1250 || Lr: 0.000100 || Loss: 0.6859 || Cost Time: 0:03:05 || Estimated Time: 9:41:30
2021-07-05 06:00:19,776 Segmentron INFO: Epoch: 1/50 || Iters: 340/1250 || Lr: 0.000100 || Loss: 0.9854 || Cost Time: 0:03:10 || Estimated Time: 9:41:08
2021-07-05 06:00:25,292 Segmentron INFO: Epoch: 1/50 || Iters: 350/1250 || Lr: 0.000099 || Loss: 0.5796 || Cost Time: 0:03:16 || Estimated Time: 9:40:46
2021-07-05 06:00:30,809 Segmentron INFO: Epoch: 1/50 || Iters: 360/1250 || Lr: 0.000099 || Loss: 0.3313 || Cost Time: 0:03:21 || Estimated Time: 9:40:25
2021-07-05 06:00:36,319 Segmentron INFO: Epoch: 1/50 || Iters: 370/1250 || Lr: 0.000099 || Loss: 0.7653 || Cost Time: 0:03:27 || Estimated Time: 9:40:04
2021-07-05 06:00:41,804 Segmentron INFO: Epoch: 1/50 || Iters: 380/1250 || Lr: 0.000099 || Loss: 0.5316 || Cost Time: 0:03:32 || Estimated Time: 9:39:39
2021-07-05 06:00:47,282 Segmentron INFO: Epoch: 1/50 || Iters: 390/1250 || Lr: 0.000099 || Loss: 0.5340 || Cost Time: 0:03:38 || Estimated Time: 9:39:14
2021-07-05 06:00:52,771 Segmentron INFO: Epoch: 1/50 || Iters: 400/1250 || Lr: 0.000099 || Loss: 0.7623 || Cost Time: 0:03:43 || Estimated Time: 9:38:52
2021-07-05 06:00:58,262 Segmentron INFO: Epoch: 1/50 || Iters: 410/1250 || Lr: 0.000099 || Loss: 0.5127 || Cost Time: 0:03:49 || Estimated Time: 9:38:31
2021-07-05 06:01:03,751 Segmentron INFO: Epoch: 1/50 || Iters: 420/1250 || Lr: 0.000099 || Loss: 0.5150 || Cost Time: 0:03:54 || Estimated Time: 9:38:10
2021-07-05 06:01:09,257 Segmentron INFO: Epoch: 1/50 || Iters: 430/1250 || Lr: 0.000099 || Loss: 0.9970 || Cost Time: 0:04:00 || Estimated Time: 9:37:53
2021-07-05 06:01:14,772 Segmentron INFO: Epoch: 1/50 || Iters: 440/1250 || Lr: 0.000099 || Loss: 0.6717 || Cost Time: 0:04:05 || Estimated Time: 9:37:37
2021-07-05 06:01:20,293 Segmentron INFO: Epoch: 1/50 || Iters: 450/1250 || Lr: 0.000099 || Loss: 0.9943 || Cost Time: 0:04:11 || Estimated Time: 9:37:23
2021-07-05 06:01:25,819 Segmentron INFO: Epoch: 1/50 || Iters: 460/1250 || Lr: 0.000099 || Loss: 0.5162 || Cost Time: 0:04:16 || Estimated Time: 9:37:09
2021-07-05 06:01:31,355 Segmentron INFO: Epoch: 1/50 || Iters: 470/1250 || Lr: 0.000099 || Loss: 0.3666 || Cost Time: 0:04:22 || Estimated Time: 9:36:58
2021-07-05 06:01:36,888 Segmentron INFO: Epoch: 1/50 || Iters: 480/1250 || Lr: 0.000099 || Loss: 0.7810 || Cost Time: 0:04:27 || Estimated Time: 9:36:46
2021-07-05 06:01:42,427 Segmentron INFO: Epoch: 1/50 || Iters: 490/1250 || Lr: 0.000099 || Loss: 0.9375 || Cost Time: 0:04:33 || Estimated Time: 9:36:35
2021-07-05 06:01:47,957 Segmentron INFO: Epoch: 1/50 || Iters: 500/1250 || Lr: 0.000099 || Loss: 0.7715 || Cost Time: 0:04:38 || Estimated Time: 9:36:24
2021-07-05 06:01:53,492 Segmentron INFO: Epoch: 1/50 || Iters: 510/1250 || Lr: 0.000099 || Loss: 0.7611 || Cost Time: 0:04:44 || Estimated Time: 9:36:13
2021-07-05 06:01:59,015 Segmentron INFO: Epoch: 1/50 || Iters: 520/1250 || Lr: 0.000099 || Loss: 1.3777 || Cost Time: 0:04:49 || Estimated Time: 9:36:01
2021-07-05 06:02:04,528 Segmentron INFO: Epoch: 1/50 || Iters: 530/1250 || Lr: 0.000099 || Loss: 0.4394 || Cost Time: 0:04:55 || Estimated Time: 9:35:48
2021-07-05 06:02:10,029 Segmentron INFO: Epoch: 1/50 || Iters: 540/1250 || Lr: 0.000099 || Loss: 0.4215 || Cost Time: 0:05:00 || Estimated Time: 9:35:34
2021-07-05 06:02:15,548 Segmentron INFO: Epoch: 1/50 || Iters: 550/1250 || Lr: 0.000099 || Loss: 0.2931 || Cost Time: 0:05:06 || Estimated Time: 9:35:22
2021-07-05 06:02:21,062 Segmentron INFO: Epoch: 1/50 || Iters: 560/1250 || Lr: 0.000099 || Loss: 0.4534 || Cost Time: 0:05:12 || Estimated Time: 9:35:10
2021-07-05 06:02:26,563 Segmentron INFO: Epoch: 1/50 || Iters: 570/1250 || Lr: 0.000099 || Loss: 0.7274 || Cost Time: 0:05:17 || Estimated Time: 9:34:57
2021-07-05 06:02:32,071 Segmentron INFO: Epoch: 1/50 || Iters: 580/1250 || Lr: 0.000099 || Loss: 0.2845 || Cost Time: 0:05:23 || Estimated Time: 9:34:45
2021-07-05 06:02:37,582 Segmentron INFO: Epoch: 1/50 || Iters: 590/1250 || Lr: 0.000099 || Loss: 0.4167 || Cost Time: 0:05:28 || Estimated Time: 9:34:33
2021-07-05 06:02:43,087 Segmentron INFO: Epoch: 1/50 || Iters: 600/1250 || Lr: 0.000099 || Loss: 0.7590 || Cost Time: 0:05:34 || Estimated Time: 9:34:21
2021-07-05 06:02:48,595 Segmentron INFO: Epoch: 1/50 || Iters: 610/1250 || Lr: 0.000099 || Loss: 0.5357 || Cost Time: 0:05:39 || Estimated Time: 9:34:09
2021-07-05 06:02:54,104 Segmentron INFO: Epoch: 1/50 || Iters: 620/1250 || Lr: 0.000099 || Loss: 0.4961 || Cost Time: 0:05:45 || Estimated Time: 9:33:58
2021-07-05 06:02:59,607 Segmentron INFO: Epoch: 1/50 || Iters: 630/1250 || Lr: 0.000099 || Loss: 0.5018 || Cost Time: 0:05:50 || Estimated Time: 9:33:46
2021-07-05 06:03:05,094 Segmentron INFO: Epoch: 1/50 || Iters: 640/1250 || Lr: 0.000099 || Loss: 1.0672 || Cost Time: 0:05:56 || Estimated Time: 9:33:33
2021-07-05 06:03:10,593 Segmentron INFO: Epoch: 1/50 || Iters: 650/1250 || Lr: 0.000099 || Loss: 0.2307 || Cost Time: 0:06:01 || Estimated Time: 9:33:22
2021-07-05 06:03:16,084 Segmentron INFO: Epoch: 1/50 || Iters: 660/1250 || Lr: 0.000099 || Loss: 0.8019 || Cost Time: 0:06:07 || Estimated Time: 9:33:09
2021-07-05 06:03:21,586 Segmentron INFO: Epoch: 1/50 || Iters: 670/1250 || Lr: 0.000099 || Loss: 0.7613 || Cost Time: 0:06:12 || Estimated Time: 9:32:58
2021-07-05 06:03:27,086 Segmentron INFO: Epoch: 1/50 || Iters: 680/1250 || Lr: 0.000099 || Loss: 0.6910 || Cost Time: 0:06:18 || Estimated Time: 9:32:47
2021-07-05 06:03:32,570 Segmentron INFO: Epoch: 1/50 || Iters: 690/1250 || Lr: 0.000099 || Loss: 0.5595 || Cost Time: 0:06:23 || Estimated Time: 9:32:35
2021-07-05 06:03:38,067 Segmentron INFO: Epoch: 1/50 || Iters: 700/1250 || Lr: 0.000099 || Loss: 1.1118 || Cost Time: 0:06:29 || Estimated Time: 9:32:24
2021-07-05 06:03:43,560 Segmentron INFO: Epoch: 1/50 || Iters: 710/1250 || Lr: 0.000099 || Loss: 0.4333 || Cost Time: 0:06:34 || Estimated Time: 9:32:13
2021-07-05 06:03:49,057 Segmentron INFO: Epoch: 1/50 || Iters: 720/1250 || Lr: 0.000099 || Loss: 0.3692 || Cost Time: 0:06:40 || Estimated Time: 9:32:02
2021-07-05 06:03:54,561 Segmentron INFO: Epoch: 1/50 || Iters: 730/1250 || Lr: 0.000099 || Loss: 0.8774 || Cost Time: 0:06:45 || Estimated Time: 9:31:52
2021-07-05 06:04:00,045 Segmentron INFO: Epoch: 1/50 || Iters: 740/1250 || Lr: 0.000099 || Loss: 0.5601 || Cost Time: 0:06:50 || Estimated Time: 9:31:41
2021-07-05 06:04:05,534 Segmentron INFO: Epoch: 1/50 || Iters: 750/1250 || Lr: 0.000099 || Loss: 0.3583 || Cost Time: 0:06:56 || Estimated Time: 9:31:30
2021-07-05 06:04:11,036 Segmentron INFO: Epoch: 1/50 || Iters: 760/1250 || Lr: 0.000099 || Loss: 0.6755 || Cost Time: 0:07:01 || Estimated Time: 9:31:20
2021-07-05 06:04:16,538 Segmentron INFO: Epoch: 1/50 || Iters: 770/1250 || Lr: 0.000099 || Loss: 0.7194 || Cost Time: 0:07:07 || Estimated Time: 9:31:11
2021-07-05 06:04:22,030 Segmentron INFO: Epoch: 1/50 || Iters: 780/1250 || Lr: 0.000099 || Loss: 0.8702 || Cost Time: 0:07:12 || Estimated Time: 9:31:00
2021-07-05 06:04:27,525 Segmentron INFO: Epoch: 1/50 || Iters: 790/1250 || Lr: 0.000099 || Loss: 0.6927 || Cost Time: 0:07:18 || Estimated Time: 9:30:50
2021-07-05 06:04:33,026 Segmentron INFO: Epoch: 1/50 || Iters: 800/1250 || Lr: 0.000099 || Loss: 0.3912 || Cost Time: 0:07:23 || Estimated Time: 9:30:41
2021-07-05 06:04:38,529 Segmentron INFO: Epoch: 1/50 || Iters: 810/1250 || Lr: 0.000099 || Loss: 0.7517 || Cost Time: 0:07:29 || Estimated Time: 9:30:32
2021-07-05 06:04:44,022 Segmentron INFO: Epoch: 1/50 || Iters: 820/1250 || Lr: 0.000099 || Loss: 0.6233 || Cost Time: 0:07:34 || Estimated Time: 9:30:22
2021-07-05 06:04:49,524 Segmentron INFO: Epoch: 1/50 || Iters: 830/1250 || Lr: 0.000099 || Loss: 0.4822 || Cost Time: 0:07:40 || Estimated Time: 9:30:13
2021-07-05 06:04:55,015 Segmentron INFO: Epoch: 1/50 || Iters: 840/1250 || Lr: 0.000099 || Loss: 0.3702 || Cost Time: 0:07:45 || Estimated Time: 9:30:03
2021-07-05 06:05:00,513 Segmentron INFO: Epoch: 1/50 || Iters: 850/1250 || Lr: 0.000099 || Loss: 0.4319 || Cost Time: 0:07:51 || Estimated Time: 9:29:54
2021-07-05 06:05:06,013 Segmentron INFO: Epoch: 1/50 || Iters: 860/1250 || Lr: 0.000099 || Loss: 0.4764 || Cost Time: 0:07:56 || Estimated Time: 9:29:45
2021-07-05 06:05:11,509 Segmentron INFO: Epoch: 1/50 || Iters: 870/1250 || Lr: 0.000099 || Loss: 1.0134 || Cost Time: 0:08:02 || Estimated Time: 9:29:36
  • 2
    点赞
  • 5
    收藏
    觉得还不错? 一键收藏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值