基于pytorch的ESRGAN(论文阅读笔记+复现)

代码的框架——《https://github.com/xinntao/BasicSR

ESRGAN论文《ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks》的链接——https://arxiv.org/pdf/1809.00219.pdf

代码在目录/home/guanwp/BasicSR-master/codes/下,运行以下命令实现train和test

python train.py -opt options/train/train_esrgan.json

python test.py -opt options/test/test_esrgan.json

关于其中的G网络的实验可参考博文《实验笔记之——基于RRDBNet的Octave Convolution实验记录

理论

 

 

 

 

代码

给出setting

{
  "name": "ESRGAN_x4_DIV2K" //  please remove "debug_" during training
  , "use_tb_logger": true
  , "model":"srgan"
  , "scale": 4
  , "gpu_ids": [3,4,5]

  , "datasets": {
    "train": {
      "name": "DIV2K"
      , "mode": "LRHR"
      , "dataroot_HR": "/home/guanwp/BasicSR_datasets/DIV2K800_sub"
      , "dataroot_LR": "/home/guanwp/BasicSR_datasets/DIV2K800_sub_bicLRx4"
      , "subset_file": null
      , "use_shuffle": true
      , "n_workers": 8
      , "batch_size": 16
      , "HR_size": 128
      , "use_flip": true
      , "use_rot": true
    }
    , "val": {
      "name": "val_set5"
      , "mode": "LRHR"
      , "dataroot_HR": "/home/guanwp/BasicSR_datasets/val_set5/Set5"
      , "dataroot_LR": "/home/guanwp/BasicSR_datasets/val_set5/Set5_sub_bicLRx4"
    }
  }

  , "path": {
    "root": "/home/guanwp/BasicSR-master",
    "pretrain_model_G": null
     ,"experiments_root": "/home/guanwp/BasicSR-master/experiments/",
    "models": "/home/guanwp/BasicSR-master/experiments/ESRGAN_x4_DIV2K/models",
    "log": "/home/guanwp/BasicSR-master/experiments/ESRGAN_x4_DIV2K",
    "val_images": "/home/guanwp/BasicSR-master/experiments/ESRGAN_x4_DIV2K/val_images"
  }

  , "network_G": {
    "which_model_G": "RRDB_net" // RRDB_net | sr_resnet
    , "norm_type": null
    , "mode": "CNA"
    , "nf": 64
    , "nb": 23// number of residual block
    , "in_nc": 3
    , "out_nc": 3
    , "gc": 32
    , "group": 1
  }
  , "network_D": {
    "which_model_D": "discriminator_vgg_128"
    , "norm_type": "batch"
    , "act_type": "leakyrelu"
    , "mode": "CNA"
    , "nf": 64
    , "in_nc": 3
  }

  , "train": {
    "lr_G": 1e-4
    , "weight_decay_G": 0
    , "beta1_G": 0.9
    , "lr_D": 1e-4
    , "weight_decay_D": 0
    , "beta1_D": 0.9
    , "lr_scheme": "MultiStepLR"
    , "lr_steps": [50000, 100000, 200000, 300000]
    , "lr_gamma": 0.5

    , "pixel_criterion": "l1"
    , "pixel_weight": 0//1e-2//just for the NIQE, you should set to 0
    , "feature_criterion": "l1"
    , "feature_weight": 1
    , "gan_type": "vanilla"
    , "gan_weight": 5e-3

    //for wgan-gp
     , "D_update_ratio": 1//for the D network
     , "D_init_iters": 0
    // , "gp_weigth": 10

    , "manual_seed": 0
    , "niter": 5e5//6e5//5e5
    , "val_freq": 2000//5e3
  }

  , "logger": {
    "print_freq": 200
    , "save_checkpoint_freq": 5e3
  }
}

运行代码

结果

训了四天多的ESRGAN的结果。。。。

接下来test一下,分析一下效果

 

 

 

 

 

 

 

 

 

 

 

 

 

 

ESRGAN是一种基于深度学习的图像超分辨率重建方法,可以将低分辨率图像转换为高分辨率图像。以下是ESRGANPyTorch代码复现: ```python import torch import torch.nn as nn import torch.nn.functional as F class ResidualBlock(nn.Module): def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1) self.bn1 = nn.BatchNorm2d(channels) self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1) self.bn2 = nn.BatchNorm2d(channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out += residual out = self.relu(out) return out class UpsampleBlock(nn.Module): def __init__(self, in_channels, up_scale): super(UpsampleBlock, self).__init__() self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, stride=1, padding=1) self.pixel_shuffle = nn.PixelShuffle(up_scale) self.relu = nn.ReLU(inplace=True) def forward(self, x): out = self.conv(x) out = self.pixel_shuffle(out) out = self.relu(out) return out class Generator(nn.Module): def __init__(self, scale_factor): super(Generator, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=9, stride=1, padding=4) self.relu = nn.ReLU(inplace=True) self.residual_blocks = nn.Sequential( ResidualBlock(64), ResidualBlock(64), ResidualBlock(64), ResidualBlock(64), ResidualBlock(64) ) self.conv2 = nn.Conv2d(64,64, kernel_size=3, stride=1, padding=1) self.bn2 = nn.BatchNorm2d(64) self.upsample_blocks = nn.Sequential( UpsampleBlock(64, scale_factor), UpsampleBlock(64, scale_factor), UpsampleBlock(64, scale_factor) ) self.conv3 = nn.Conv2d(64, 3, kernel_size=9, stride=1, padding=4) def forward(self, x): out = self.conv1(x) out = self.relu(out) residual = out out = self.residual_blocks(out) out = self.conv2(out) out = self.bn2(out) out += residual out = self.upsample_blocks(out) out = self.conv3(out) return out ``` 这是一个简单的ESRGAN生成器模型,包含了残差块和上采样块。可以通过调整残差块和上采样块的数量和大小来改进模型的性能。
评论 9
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值