实验——TV Loss解决over fitting的问题

以residual作为DnCNN的target的时候,会出现如下的overfitting的现象

而且,很奇怪的是,该现象仅仅会出现在LR图片上,而不是HR图上。本来可以通过提前终止训练就可以解决overfitting的问题,但是这个问题还是没有从根本上解决。为此,通过加入TVLOSS来达到正则化的作用

 

先给出TVLOSS(在之前博文中已经介绍过TVLOSS《 基于pytorch的噪声估计网络》)

#########################################
#TV loss(total variation regularizer)
class TVLoss(nn.Module):
    def __init__(self,TVLoss_weight=1):
        super(TVLoss,self).__init__()
        self.TVLoss_weight = TVLoss_weight

    def forward(self,x):
        batch_size = x.size()[0]
        h_x = x.size()[2]
        w_x = x.size()[3]
        count_h = self._tensor_size(x[:,:,1:,:])
        count_w = self._tensor_size(x[:,:,:,1:])
        h_tv = torch.pow((x[:,:,1:,:]-x[:,:,:h_x-1,:]),2).sum()
        w_tv = torch.pow((x[:,:,:,1:]-x[:,:,:,:w_x-1]),2).sum()
        return self.TVLoss_weight*2*(h_tv/count_h+w_tv/count_w)/batch_size

    def _tensor_size(self,t):
        return t.size()[1]*t.size()[2]*t.size()[3]


##########################################

然后再更改SRmodel

import os
from collections import OrderedDict

import torch
import torch.nn as nn
from torch.optim import lr_scheduler

import models.networks as networks
from .base_model import BaseModel
from models.modules.loss import TVLoss


class SRModel(BaseModel):
    def __init__(self, opt):
        super(SRModel, self).__init__(opt)
        train_opt = opt['train']
        res_mode = opt['network_G']['mode']
        net_type = opt['network_G']['which_model_G']
        finetune_type = opt['finetune_type']
        init_norm_type = opt['init_norm_type']

        # define network and load pretrained models
        self.netG = networks.define_G(opt).to(self.device)
        self.load()

        if init_norm_type is not None:
            self.__init_norm(res_mode, net_type, init_norm_type)

        if self.is_train:
            self.netG.train()

            # loss
            loss_type = train_opt['pixel_criterion']
            if loss_type == 'l1':
                self.cri_pix = nn.L1Loss().to(self.device)
            elif loss_type == 'l2':
                self.cri_pix = nn.MSELoss().to(self.device)
            elif loss_type == 'l2_tv':
                self.cri_pix = nn.MSELoss().to(self.device)
                self.cri_tv = TVLoss().to(self.device)
                self.tvloss_paremater=1e-5
            else:
                raise NotImplementedError('Loss type [{:s}] is not recognized.'.format(loss_type))
            self.l_pix_w = train_opt['pixel_weight']

            # optimizers
            wd_G = train_opt['weight_decay_G'] if train_opt['weight_decay_G'] else 0
            #
            self.optim_params = self.__define_grad_params(finetune_type, res_mode, net_type)

            self.optimizer_G = torch.optim.Adam(
                self.optim_params, lr=train_opt['lr_G'], weight_decay=wd_G)
            self.optimizers.append(self.optimizer_G)

            # optim_params_first_layer = []
            # optim_params_other = []
            # for k, v in self.netG.named_parameters():  # can optimize for a part of the model
            #     if 'model.0' in k:
            #         optim_params_first_layer.append(v)
            #     else:
            #         optim_params_other.append(v)
            # self.optimizer_G_first_layer = torch.optim.Adam(optim_params_first_layer, lr=train_opt['lr_G'] * 10,
            #                                                 weight_decay=wd_G)
            # self.optimizer_G_other = torch.optim.Adam(optim_params_other, lr=train_opt['lr_G'], weight_decay=wd_G)
            # self.optimizers.append(self.optimizer_G_first_layer)
            # self.optimizers.append(self.optimizer_G_other)

            # schedulers
            if train_opt['lr_scheme'] == 'MultiStepLR':
                for optimizer in self.optimizers:
                    self.schedulers.append(lr_scheduler.MultiStepLR(optimizer, \
                        train_opt['lr_steps'], train_opt['lr_gamma']))
            else:
                raise NotImplementedError('MultiStepLR learning rate scheme is enough.')

            self.log_dict = OrderedDict()

        print('---------- Model initialized ------------------')
        self.print_network()
        print('-----------------------------------------------')

    def __define_grad_params(self, finetune_type=None, res_mode='CNA', net_type='denoise_resnet'):

        optim_params = []
        if finetune_type == 'norm':
            for k, v in self.netG.named_parameters():
                v.requires_grad = False
                # if net_type == 'arcnn' or net_type == 'srcnn':
                #     if k.find('model.1') >= 0:
                #         v.requires_grad = True
                #         optim_params.append(v)
                #         print('we only optimize params: {}'.format(k))
                if res_mode == "CNA" or res_mode == "NCA":
                    # if k.find('res.1') >= 0 or k.find('res.4') >= 0 or k.find("sub.17") >= 0:
                    if k.find('transformer') >= 0:
                        v.requires_grad = True
                        optim_params.append(v)
                        print('we only optimize params: {}'.format(k))
        elif finetune_type == 'estimation':
            for k, v in self.netG.named_parameters():
                v.requires_grad = False
                if k.find('Gate') >= 0 or k.find('degration') >= 0:
                    v.requires_grad = True
                    optim_params.append(v)
                    print('we only optimize params: {}'.format(k))
        elif finetune_type == 'sft':
            for k, v in self.netG.named_parameters():
                v.requires_grad = False
                if k.find('Gate') >= 0:
                    v.requires_grad = True
                    optim_params.append(v)
                    print('we only optimize params: {}'.format(k))
        elif finetune_type == 'fea_upsample':
            for k, v in self.netG.named_parameters():
                v.requires_grad = False
                if k.find('res') < 0:
                    v.requires_grad = True
                    optim_params.append(v)
                    print('we only optimize params: {}'.format(k))
        elif finetune_type == 'upsample':
            for k, v in self.netG.named_parameters():
                v.requires_grad = False
                if k.find('model.1') < 0 and k.find('model.0') < 0:
                    v.requires_grad = True
                    optim_params.append(v)
                    print('we only optimize params: {}'.format(k))
        elif finetune_type == 'first_layer':
            for k, v in self.netG.named_parameters():
                v.requires_grad = False
                if k.find('model.0') >= 0:
                    v.requires_grad = True
                    optim_params.append(v)
                    print('we only optimize params: {}'.format(k))
        else:
            for k, v in self.netG.named_parameters():
                if v.requires_grad:
                    optim_params.append(v)
                else:
                    print('WARNING: params [%s] will not optimize.' % k)
        return optim_params

    def __init_norm(self, res_mode='CNA', net_type='denoise_resnet', init_norm_type='zero'):
        for k, v in self.netG.named_parameters():
            # if net_type == "arcnn" or net_type == "srcnn":
            #     if k.find('model.1') >= 0:
            #         if init_norm_type == 'zero':
            #             print(k, 'initialize with 0')
            #             nn.init.constant(v, 0)
            if res_mode == "CNA" or res_mode == "NCA":
                # if k.find('res.1') >= 0 or k.find('res.4') >= 0 or k.find("sub.17") >= 0:
                if k.find('transformer') >= 0:
                    if init_norm_type == "instance" or init_norm_type == "batch":
                        if "weight" in k:
                            print(k, "initialize with xavier_uniform")
                            nn.init.constant(v, 1)
                        elif "bias" in k:
                            print(k, "initialize with 0")
                            nn.init.constant(v, 0)
                    elif init_norm_type == 'zero':
                        print(k, "initialize with 0")
                        nn.init.constant(v, 0)

    def feed_data(self, data, need_HR=True, noise_gt=False):
        self.var_L = data['LR'].to(self.device)  # LR
        if need_HR:
            self.real_H = data['HR'].to(self.device)  # HR

    def forward_data_list(self, data_list):
        # eval() makes the batch norm's mean and variance unchanged
        self.netG.eval()
        for k, v in self.netG.named_parameters():
            v.requires_grad = False
        self.var_L = [data['LR'].to(self.device) for data in data_list]
        self.fake_H = self.netG(self.var_L)

    def optimize_parameters(self, step):
        self.optimizer_G.zero_grad()
        self.fake_H = self.netG(self.var_L)
        l_pix = self.l_pix_w * self.cri_pix(self.fake_H, self.real_H)
        l_pix=l_pix+self.tvloss_paremater*self.cri_tv(self.fake_H)
        l_pix.backward()
        self.optimizer_G.step()

        # set log
        self.log_dict['l_pix'] = l_pix.item()

    def test(self):
        self.netG.eval()
        if self.is_train:
            for v in self.optim_params:
                v.requires_grad = False
        else:
            for k, v in self.netG.named_parameters():
                v.requires_grad = False
        self.fake_H = self.netG(self.var_L)
        if self.is_train:
            for v in self.optim_params:
                v.requires_grad = True
        else:
            for k, v in self.netG.named_parameters():
                v.requires_grad = True
        self.netG.train()

    # def test(self):
    #     self.netG.eval()
    #     for k, v in self.netG.named_parameters():
    #         v.requires_grad = False
    #     self.fake_H = self.netG(self.var_L)
    #     for k, v in self.netG.named_parameters():
    #         v.requires_grad = True
    #     self.netG.train()

    def test_x8(self):
        # from https://github.com/thstkdgus35/EDSR-PyTorch
        self.netG.eval()
        for k, v in self.netG.named_parameters():
            v.requires_grad = False

        def _transform(v, op):
            # if self.precision != 'single': v = v.float()
            v2np = v.data.cpu().numpy()
            if op == 'v':
                tfnp = v2np[:, :, :, ::-1].copy()
            elif op == 'h':
                tfnp = v2np[:, :, ::-1, :].copy()
            elif op == 't':
                tfnp = v2np.transpose((0, 1, 3, 2)).copy()

            ret = torch.Tensor(tfnp).to(self.device)
            # if self.precision == 'half': ret = ret.half()

            return ret

        lr_list = [self.var_L]
        for tf in 'v', 'h', 't':
            lr_list.extend([_transform(t, tf) for t in lr_list])
        sr_list = [self.netG(aug) for aug in lr_list]
        for i in range(len(sr_list)):
            if i > 3:
                sr_list[i] = _transform(sr_list[i], 't')
            if i % 4 > 1:
                sr_list[i] = _transform(sr_list[i], 'h')
            if (i % 4) % 2 == 1:
                sr_list[i] = _transform(sr_list[i], 'v')

        output_cat = torch.cat(sr_list, dim=0)
        self.fake_H = output_cat.mean(dim=0, keepdim=True)

        for k, v in self.netG.named_parameters():
            v.requires_grad = True
        self.netG.train()

    def get_current_log(self):
        return self.log_dict

    def get_current_visuals(self, need_HR=True):
        out_dict = OrderedDict()
        out_dict['LR'] = self.var_L.detach()[0].float().cpu()
        out_dict['SR'] = self.fake_H.detach()[0].float().cpu()
        if need_HR:
            out_dict['HR'] = self.real_H.detach()[0].float().cpu()
        return out_dict

    def print_network(self):
        s, n = self.get_network_description(self.netG)
        print('Number of parameters in G: {:,d}'.format(n))
        if self.is_train:
            message = '-------------- Generator --------------\n' + s + '\n'
            network_path = os.path.join(self.save_dir, '../', 'network.txt')
            with open(network_path, 'w') as f:
                f.write(message)

    def update(self, new_model_dict):
        if isinstance(self.netG, nn.DataParallel):
            network = self.netG.module
            network.load_state_dict(new_model_dict)

    def load(self):
        load_path_G = self.opt['path']['pretrain_model_G']
        if load_path_G is not None:
            print('loading model for G [{:s}] ...'.format(load_path_G))
            self.load_network(load_path_G, self.netG)

    def save(self, iter_label):
        self.save_network(self.save_dir, self.netG, 'G', iter_label)

 

python train.py -opt options/train/train_sr.json

{
  "name": "LR_x4_subnet_residual_DIV2K_guan_TVLOSS__modify" //  please remove "debug_" during training
  , "tb_logger_dir": "sr"
  , "use_tb_logger": true
  , "model":"sr"
  , "scale": 1
  , "crop_scale":0
  , "gpu_ids": [1]
//  , "init_type": "kaiming"
//
//  , "finetune_type": "sft"
//  , "init_norm_type": "zero"

  , "datasets": {
    "train": {
      "name": "DIV2K"
//      , "mode": "LQHQ"
//      , "dataroot_HR": "/media/sdc/jwhe/BasicSR_v2/data/DIV2K/DIV2K800_sub"
//      , "dataroot_HQ": "/media/sdc/jwhe/BasicSR_v2/data/DIV2K/DIV2K800_sub_Gaussian15"
//      , "dataroot_LQ": "/media/sdc/jwhe/BasicSR_v2/data/DIV2K/DIV2K800_sub_Gaussian50"
      , "mode": "LRHR"
      , "dataroot_HR": "/home/guanwp/BasicSR_datasets/DIV2K800_sub_bicLRx4"
      , "dataroot_LR": "/home/guanwp/BasicSR_datasets/DIV2K800_sub_bicLRx4_noiseALL"
      , "subset_file": null
      , "use_shuffle": true
      , "n_workers": 8
      , "batch_size": 24 // 16
      , "HR_size": 96 // 128 | 192 | 96
      , "noise_gt": true///residual
      , "use_flip": true
      , "use_rot": true
    }
//
//    , "val": {
//      "name": "val_CBSD68_Gaussian50",
//      "mode": "LRHR",
//      "dataroot_HR": "/home/jwhe/workspace/BasicSR_v3/data/CBSD68/mod2/CBSD68_mod",
//      "dataroot_LR": "/home/jwhe/workspace/BasicSR_v3/data/CBSD68/mod2/CBSD68_Gaussian50"
//      , "noise_gt": true
//    }
//   , "val": {
//      "name": "val_CBSD68_s08_c03",
//      "mode": "LRHR",
//      "dataroot_HR": "/home/jwhe/workspace/BasicSR_v3/data/CBSD68/mod2/CBSD68_mod",
//      "dataroot_LR": "/home/jwhe/workspace/BasicSR_v3/data/CBSD68/mod2/CBSD68_s08_c03"
//      , "noise_gt": true
//    }

//  , "val": {
//      "name": "val_CBSD68_clean",
//      "mode": "LRHR",
//      "dataroot_HR": "/media/sdc/jwhe/BasicSR_v2/data/CBSD68/mod2/CBSD68_mod",
//      "dataroot_LR": "/media/sdc/jwhe/BasicSR_v2/data/CBSD68/mod2/CBSD68_mod"
//    }

//    , "val": {
//      "name": "val_LIVE1_gray_JEPG10",
//      "mode": "LRHR",
//      "dataroot_HR": "/media/hjw/jwhe/BasicSR_v2/data/val/LIVE1_val/LIVE1_gray_mod",
//      "dataroot_LR": "/media/hjw/jwhe/BasicSR_v2/data/val/LIVE1_val/LIVE1_gray_jpg10"
//    }

//      , "val": {
//      "name": "val_LIVE1_JEPG80",
//      "mode": "LRHR",
//      "dataroot_HR": "/media/hjw/jwhe/BasicSR_v2/data/val/LIVE1_val/LIVE1_mod",
//      "dataroot_LR": "/media/hjw/jwhe/BasicSR_v2/data/val/LIVE1_val/LIVE1_jpg80"
//    }

//    , "val_2": {
//      "name": "val_Classic5_gray_JEPG30",
//      "mode": "LRHR",
//      "dataroot_HR": "/media/sdc/jwhe/BasicSR_v2/data/val/Classic5_val/classic5_mod",
//      "dataroot_LR": "/media/sdc/jwhe/BasicSR_v2/data/val/Classic5_val/classic5_jpg30"
//    }

//    , "val": {
//      "name": "val_BSD68_gray_Gaussian50",
//      "mode": "LRHR",
//      "dataroot_HR": "/media/sdc/jwhe/BasicSR_v2/data/BSD68/mod2/BSD68_mod",
//      "dataroot_LR": "/media/sdc/jwhe/BasicSR_v2/data/BSD68/mod2/BSD68_gray_Gaussian50"
//    }

//    , "val": {
//      "name": "val_set5_x4_gray_mod4"
//      , "mode": "LRHR"
//      , "dataroot_HR": "/media/sdc/jwhe/BasicSR_v2/data/val/Set5_val/mod4/Set5_gray_mod4"
//      , "dataroot_LR": "/media/sdc/jwhe/BasicSR_v2/data/val/Set5_val/mod4/Set5_gray_bicx4"
//    }
//
//    , "val": {
//      "name": "val_set5_x4_noise50_mod4",
//      "mode": "LRHR",
//      "dataroot_HR": "/home/jwhe/workspace/BasicSR_v3/data/val/Set5_val/mod4/Set5_mod4",
//      "dataroot_LR": "/home/jwhe/workspace/BasicSR_v3/data/val/Set5_val/mod4/Set5_bicLRx4_noise50_UPx4"
//    }

//  , "val": {
//      "name": "val_set5_x4_c03s08_LR_mod4",
//      "mode": "LRHR",
//      "dataroot_HR": "/home/jwhe/workspace/BasicSR_v3/data/val/Set5_val/mod4/Set5_mod4",
//      "dataroot_LR": "/home/jwhe/workspace/BasicSR_v3/data/val/Set5_val/mod4/Set5_bicLRx4_c03s08"
//    }

  , "val": {
    "name": "val_set5_x4_LR_residual_mod4",
    "mode": "LRHR",
    "dataroot_HR": "/home/guanwp/BasicSR_datasets/val_set5/Set5_sub_bicLRx4",
    "dataroot_LR": "/home/guanwp/BasicSR_datasets/val_set5/Set5_sub_bicLRx4_noiseALL"
      , "noise_gt":  true
    }


//    , "val": {
//      "name": "val_set5_x3_mod6"
//      , "mode": "LRHR"
//      , "dataroot_HR": "/media/sdc/jwhe/BasicSR_v2/data/val/Set5_val/mod6/Set5_mod6"
//      , "dataroot_LR": "/media/sdc/jwhe/BasicSR_v2/data/val/Set5_val/mod6/Set5_bicx3"
//    }
//  }
//
//    , "val": {
//      "name": "val_set5_x3_gray_mod6"
//      , "mode": "LRHR"
//      , "dataroot_HR": "/media/sdc/jwhe/BasicSR_v2/data/val/Set5_val/mod6/Set5_gray_mod6"
//      , "dataroot_LR": "/media/sdc/jwhe/BasicSR_v2/data/val/Set5_val/mod6/Set5_gray_bicx3"
//    }
  }

  , "path": {
    "root": "/home/guanwp/jingwen/sr"
    , "pretrain_model_G": null
//   , "pretrain_model_G": "../noise_from15to75/experiments/noise15_subnet/models/34000_G.pth"
//  , "pretrain_model_G": "../experiments/pretrained_models/noise_estimation/01_gaussian15_nonorm_denoise_resnet_DIV2K/noise15_904000_subnet_noise15_34000.pth"
//  , "pretrain_model_G": "../experiments/pretrained_models/noise_estimation/01_gaussian15_nonorm_denoise_resnet_DIV2K/noise15_904000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/jpeg_estimation/JPEG80_gray_nonorm_denoise_resnet_DIV2K/jpeg80_964000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG30_gray_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_982000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/50to15_models/gaussian75_nonorm_denoise_resnet_DIV2K/noise_CNA_adaptive_988000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/observation_sr/02_bicx3_first_lrx10_nonorm_srcnn_DIV2K/size_CNA_all_conv_adaptive_1000000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG50_gray_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_958000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/50to15_models/gaussian50_nonorm_denoise_resnet_DIV2K_930000/noise_CNA_adaptive.pth"
//     , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG80_gray_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_964000.pth"
//      , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG80_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_912000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/50to15_models/gaussian90_nonorm_denoise_resnet_DIV2K/noise_CNA_adaptive_870000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/observation_jpeg/JPEG40_nonorm_arcnn_DIV2K/jpg_CNA_all_conv_adaptive_982000.pth"
//    , "pretrain_model_G": "../baselines/experiments/01_gaussian15_nonorm_denoise_resnet_DIV2K/models/904000_G.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/observation_sr/01_bicx2_first_lrx10_nonorm_srcnn_DIV2K/size_CNA_all_conv_adaptive_996000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/observation_jpeg/JPEG40_kaiming_lr_change_e5_nonorm_arcnn_DIV2K/jpeg_CNA_all_conv_adaptive_596000.pth"
//  , "pretrain_model_G": "../observation_sr/experiments/01_bicx2_first_lrx10_nonorm_srcnn_DIV2K/models/996000_G.pth"
//   , "pretrain_model_G": "../observation_sr/experiments/02_bicx3_first_lrx10_nonorm_srcnn_DIV2K/models/1000000_G.pth"
//   , "pretrain_model_G": "../experiments/pretrained_models/x4tox3_models/bicx5_nonorm_denoise_resnet_DIV2K/size_CNA_adaptive_794000.pth"
//  , "pretrain_model_G": "../baselines/experiments/bicx3_adaptive_ksize3_denoise_resnet_DIV2K/models/606000_G.pth"
//  , "pretrain_model_G": "../observation_noise/experiments/noise15_nonorm_arcnn_DIV2K/models/778000_G.pth"
//  , "pretrain_model_G": "../experiments/pretrained_models/observation_noise/noise15_nonorm_arcnn_DIV2K/noise_CNA_all_conv_adaptive_778000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG50_gray_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_958000.pth"
//    , "pretrain_model_G": "../../BasicSR_v2/baselines_v2/experiments/JPEG40_kaiming_lr_change_e5_nonorm_arcnn_DIV2K/models/596000_G.pth"
//    , "pretrain_model_G": "../observation_sr/experiments/bicx3_kaiming_nonorm_srcnn_DIV2K/models/982000_G.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/observation_sr/bicx3_kaiming_nonorm_srcnn_DIV2K/size_CNA_all_conv_adaptive_982000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/x4tox3_models/bicx2_nonorm_denoise_resnet_DIV2K/size_CNA_adaptive_962000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/50to15_models/01_gaussian15_nonorm_denoise_resnet_DIV2K/noise_CNA_adaptive_904000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG40_gray_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_956000.pth"
//    , "pretrain_model_G": "../baselines/experiments/bicx3_nonorm_denoise_resnet_DIV2K/models/952000_G.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG20_gray_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_966000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/x4tox3_models/bicx3_nonorm_denoise_resnet_DIV2K/size_CNA_adaptive_952000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG30_gray_nonorm_denoise_resnet_DIV2K/jpeg_CNA_adaptive_982000.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/observation_sr/bicx4_kaiming_nonorm_srcnn_DIV2K/size_CNA_adaptive_982000.pth"
//    , "pretrain_model_G": "../observation_sr/experiments/bicx4_kaiming_nonorm_srcnn_DIV2K/models/982000_G.pth"
//      , "pretrain_model_G": "../experiments/pretrained_models/jpeg10to40_models/JPEG10_gray_nonorm_denoise_resnet_DIV2K_902000/jpeg_CNA_adaptive.pth"
//    , "pretrain_model_G": "../experiments/pretrained_models/x4tox3_models/bicx4_nonorm_denoise_resnet_DIV2K_992000/size_CNA_adaptive.pth"
  }

//  , "network_G": {
//    "which_model_G": "sr_resnet" // RRDB_net | sr_resnet | denoise_resnet
    , "norm_type": "adaptive_conv_res"
//    , "norm_type": null
//    , "mode": "CNA"
//    , "nf": 64
//    , "nb": 16
//    , "in_nc": 3
//    , "out_nc": 3
    , "gc": 32
//    , "group": 1
    , "down_scale": 2
    , "ada_ksize": 1
    , "num_classes": 2
    , "fea_norm": "adaptive_conv_res"
    , "upsample_norm": "adaptive_conv_res"
//  }
//
  , "network_G": {
    "which_model_G": "noise_subnet" // RRDB_net | sr_resnet | modulate_denoise_resnet |noise_subnet
//    , "norm_type": "adaptive_conv_res"
    , "norm_type": "batch"
    , "mode": "CNA"
    , "nf": 64
//    , "nb": 16
    , "in_nc": 3
    , "out_nc": 3
//    , "gc": 32
    , "group": 1
//    , "gate_conv_bias": true
//    , "ada_ksize": 1
//    , "num_classes": 2
//    , "fea_norm": "adaptive_conv_res"
//    , "upsample_norm": "adaptive_conv_res"
  }


//    , "network_G": {
//    "which_model_G": "srcnn" // RRDB_net | sr_resnet
    , "norm_type": null
//    , "norm_type": "adaptive_conv_res"
//    , "mode": "CNA"
//    , "nf": 64
//    , "in_nc": 1
//    , "out_nc": 1
//    , "ada_ksize": 5
//  }

//  , "network_G": {
//    "which_model_G": "arcnn"
    , "norm_type": "adaptive_conv_res"
//    , "norm_type": null
//    , "mode": "CNA"
//    , "nf": 64
//    , "in_nc": 1
//    , "out_nc": 1
//    , "group": 1
    , "ada_ksize": 5
//  }

  , "train": {
//    "lr_G": 1e-3
    "lr_G": 1e-4
    , "lr_scheme": "MultiStepLR"
//    , "lr_steps": [200000, 400000, 600000, 800000]
    , "lr_steps": [500000]
//    , "lr_steps": [600000]
//    , "lr_steps": [1000000]
//    , "lr_steps": [50000, 100000, 150000, 200000, 250000]
//    , "lr_steps": [100000, 200000, 300000, 400000]
    , "lr_gamma": 0.1
//    , "lr_gamma": 0.5

    , "pixel_criterion": "l2_tv"//"l2"
    , "pixel_weight": 1.0
    , "val_freq": 1e3

    , "manual_seed": 0
    , "niter": 1e6
//    , "niter": 6e5
  }

  , "logger": {
    "print_freq": 200
    , "save_checkpoint_freq": 1e3
  }
}

 

https://github.com/hejingwenhejingwen/Blind_Restoration

 

  • 1
    点赞
  • 9
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值