多种损失函数

一:MultiscaleDiscriminator 判别器

class MultiscaleDiscriminator(nn.Module):
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, 
                 use_sigmoid=False, num_D=3, getIntermFeat=False):
        super(MultiscaleDiscriminator, self).__init__()
        self.num_D = num_D
        self.n_layers = n_layers
        self.getIntermFeat = getIntermFeat
     
        for i in range(num_D):
            netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
            if getIntermFeat:                                
                for j in range(n_layers+2):
                    setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))                                   
            else:
                setattr(self, 'layer'+str(i), netD.model)
 
        self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
 
    def singleD_forward(self, model, input):
        if self.getIntermFeat:
            result = [input]
            for i in range(len(model)):
                result.append(model[i](result[-1]))
            return result[1:]
        else:
            return [model(input)]
 
    def forward(self, input):        
        num_D = self.num_D
        result = []
        input_downsampled = input
        for i in range(num_D):
            if self.getIntermFeat:
                model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
            else:
                model = getattr(self, 'layer'+str(num_D-1-i))
            result.append(self.singleD_forward(model, input_downsampled))
            if i != (num_D-1):
                input_downsampled = self.downsample(input_downsampled)
        return result
 
 
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
        super(NLayerDiscriminator, self).__init__()
        self.getIntermFeat = getIntermFeat
        self.n_layers = n_layers
 
        kw = 4
        padw = int(np.ceil((kw-1.0)/2))
        sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
 
        nf = ndf
        for n in range(1, n_layers):
            nf_prev = nf
            nf = min(nf * 2, 512)
            sequence += [[
                nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
                norm_layer(nf), nn.LeakyReLU(0.2, True)
            ]]
 
        nf_prev = nf
        nf = min(nf * 2, 512)
        sequence += [[
            nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
            norm_layer(nf),
            nn.LeakyReLU(0.2, True)
        ]]
 
        sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
 
        if use_sigmoid:
            sequence += [[nn.Sigmoid()]]
 
        if getIntermFeat:
            for n in range(len(sequence)):
                setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
        else:
            sequence_stream = []
            for n in range(len(sequence)):
                sequence_stream += sequence[n]
            self.model = nn.Sequential(*sequence_stream)
 
    def forward(self, input):
        if self.getIntermFeat:
            res = [input]
            for n in range(self.n_layers+2):
                model = getattr(self, 'model'+str(n))
                res.append(model(res[-1]))
            return res[1:]
        else:
            return self.model(input)        
 
 
 
if __name__ == '__main__':
    netD = MultiscaleDiscriminator(input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
                 use_sigmoid=False, num_D=3, getIntermFeat=True)
    print(netD)
    input = torch.randn(1,3,512,512)
    output = netD(input)
    for i in range(2):
        for j in range(2+3):
            print(f'第scale{i}layer{j}的输出:{output[i][j].shape}')

D:\Anaconda\envs\study\python.exe "E:\image generate\pix2pixHD-master\models\networks.py" 
MultiscaleDiscriminator(
  (scale0_layer0): Sequential(
    (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale0_layer1): Sequential(
    (0): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale0_layer2): Sequential(
    (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale0_layer3): Sequential(
    (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
    (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale0_layer4): Sequential(
    (0): Conv2d(512, 1, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
  )
  (scale1_layer0): Sequential(
    (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale1_layer1): Sequential(
    (0): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale1_layer2): Sequential(
    (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale1_layer3): Sequential(
    (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
    (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale1_layer4): Sequential(
    (0): Conv2d(512, 1, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
  )
  (scale2_layer0): Sequential(
    (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale2_layer1): Sequential(
    (0): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale2_layer2): Sequential(
    (0): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(2, 2))
    (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale2_layer3): Sequential(
    (0): Conv2d(256, 512, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
    (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
    (2): LeakyReLU(negative_slope=0.2, inplace=True)
  )
  (scale2_layer4): Sequential(
    (0): Conv2d(512, 1, kernel_size=(4, 4), stride=(1, 1), padding=(2, 2))
  )
  (downsample): AvgPool2d(kernel_size=3, stride=2, padding=[1, 1])
)


第scale0layer0的输出:torch.Size([1, 64, 257, 257])
第scale0layer1的输出:torch.Size([1, 128, 129, 129])
第scale0layer2的输出:torch.Size([1, 256, 65, 65])
第scale0layer3的输出:torch.Size([1, 512, 66, 66])
第scale0layer4的输出:torch.Size([1, 1, 67, 67])

第scale1layer0的输出:torch.Size([1, 64, 129, 129])
第scale1layer1的输出:torch.Size([1, 128, 65, 65])
第scale1layer2的输出:torch.Size([1, 256, 33, 33])
第scale1layer3的输出:torch.Size([1, 512, 34, 34])
第scale1layer4的输出:torch.Size([1, 1, 35, 35])

第scale2layer0的输出:torch.Size([1, 64, 65, 65])
第scale2layer1的输出:torch.Size([1, 128, 33, 33])
第scale2layer2的输出:torch.Size([1, 256, 17, 17])
第scale2layer3的输出:torch.Size([1, 512, 18, 18])
第scale2layer4的输出:torch.Size([1, 1, 19, 19])

用GANLoss计算output的损失

class GANLoss(nn.Module):
    def __init__(self, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0,
                 tensor=torch.FloatTensor):
        super(GANLoss, self).__init__()
        self.real_label = target_real_label
        self.fake_label = target_fake_label
        self.real_label_var = None
        self.fake_label_var = None
        self.Tensor = tensor
        if use_lsgan:
            self.loss = nn.MSELoss()
        else:
            self.loss = nn.BCELoss()

    def get_target_tensor(self, input, target_is_real):
        target_tensor = None
        if target_is_real:
            create_label = ((self.real_label_var is None) or
                            (self.real_label_var.numel() != input.numel()))
            if create_label:
                real_tensor = self.Tensor(input.size()).fill_(self.real_label)
                self.real_label_var = Variable(real_tensor, requires_grad=False)
            target_tensor = self.real_label_var
        else:
            create_label = ((self.fake_label_var is None) or
                            (self.fake_label_var.numel() != input.numel()))
            if create_label:
                fake_tensor = self.Tensor(input.size()).fill_(self.fake_label)
                self.fake_label_var = Variable(fake_tensor, requires_grad=False)
            target_tensor = self.fake_label_var
        return target_tensor

    def __call__(self, input, target_is_real):
        if isinstance(input[0], list):
            loss = 0
            for input_i in input:
                pred = input_i[-1]
                target_tensor = self.get_target_tensor(pred, target_is_real)
                loss += self.loss(pred, target_tensor)
            return loss
        else:            
            target_tensor = self.get_target_tensor(input[-1], target_is_real)
            return self.loss(input[-1], target_tensor)
if __name__ == '__main__':

    netD = MultiscaleDiscriminator(input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
                 use_sigmoid=False, num_D=3, getIntermFeat=True)
    print(netD)
    image_fake = torch.randn(1,3,512,512)
    pre_output = netD(image_fake)
    for i in range(3):
        for j in range(2+3):
            print(f'第scale{i}layer{j}的输出:{pre_output[i][j].shape}')
    criterion = GANLoss()
    loss_D_fake = criterion(pre_output, False)
    print(f'loss_D_fake:{loss_D_fake}')

loss_D_fake:0.5513041019439697   
以上方法就是调用多尺度判别器并且计算损失值

二:VGG19感知判别器

class VGGLoss(nn.Module):
    def __init__(self, gpu_ids):
        super(VGGLoss, self).__init__()        
        self.vgg = Vgg19().cuda()
        self.criterion = nn.L1Loss()
        self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]        

    def forward(self, x, y):              
        x_vgg, y_vgg = self.vgg(x), self.vgg(y)
        loss = 0
        for i in range(len(x_vgg)):
            loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())        
        return loss

from torchvision import models
class Vgg19(torch.nn.Module):
    def __init__(self, requires_grad=False):
        super(Vgg19, self).__init__()
        """新版写法,需要自己添加模型参数"""
        vgg19 = models.vgg19()
        weight_dict = torch.load(r'C:\Users\Administrator\.cache\torch\hub\checkpoints\vgg19-dcbb9e9d.pth')
        vgg19.load_state_dict(weight_dict,strict=False)
        vgg_pretrained_features = vgg19.features
        # vgg_pretrained_features = models.vgg19(pretrained=True).features

        self.slice1 = torch.nn.Sequential()
        self.slice2 = torch.nn.Sequential()
        self.slice3 = torch.nn.Sequential()
        self.slice4 = torch.nn.Sequential()
        self.slice5 = torch.nn.Sequential()
        for x in range(2):
            self.slice1.add_module(str(x), vgg_pretrained_features[x])
        for x in range(2, 7):
            self.slice2.add_module(str(x), vgg_pretrained_features[x])
        for x in range(7, 12):
            self.slice3.add_module(str(x), vgg_pretrained_features[x])
        for x in range(12, 21):
            self.slice4.add_module(str(x), vgg_pretrained_features[x])
        for x in range(21, 30):
            self.slice5.add_module(str(x), vgg_pretrained_features[x])
        if not requires_grad:
            for param in self.parameters():
                param.requires_grad = False

    def forward(self, X):
        h_relu1 = self.slice1(X)
        h_relu2 = self.slice2(h_relu1)        
        h_relu3 = self.slice3(h_relu2)        
        h_relu4 = self.slice4(h_relu3)        
        h_relu5 = self.slice5(h_relu4)                
        out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
        return out


if __name__ == '__main__':

    criterion = VGGLoss(gpu_ids=0)
    fake = torch.randn(1,3,512,512).to('cuda')
    real = torch.randn(1, 3, 512, 512).to('cuda')
    loss = criterion(fake,real)
    print(f'loss:{loss}')

loss:0.7730264663696289

三:梯度方差损失:

from torch import nn
import torch
import torch.nn.functional as F


class GradientVariance(nn.Module):
    """
    计算两张RGB图像之间的梯度方差损失
    参数:
    patch_size : int类型,从真实图像和预测图像中提取的块的大小
    cpu : bool类型,是否在cpu或gpu上进行计算
    """
    def __init__(self, patch_size):
        super(GradientVariance, self).__init__()
        self.patch_size = patch_size
        # Sobel卷积核,用于梯度图的计算
        self.kernel_x = torch.FloatTensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]).unsqueeze(0).unsqueeze(0)
        self.kernel_y = torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).unsqueeze(0).unsqueeze(0)
        # 如果不在cpu上运行,则将卷积核转移到gpu
        # self.kernel_x = self.kernel_x.to(device)
        # self.kernel_y = self.kernel_y.to(device)
        # 用于将图像展开为非重叠块的操作
        self.unfold = torch.nn.Unfold(kernel_size=(self.patch_size, self.patch_size), stride=self.patch_size)

    def forward(self, output, target):
        #----------------预处理------------------------------
        if output.shape[1]==3 and target.shape[1]==3:
            # 将RGB图像转换为灰度图
            # 红色通道的权重是0.2989,绿色通道的权重是0.5870,蓝色通道的权重是0.1140
            gray_output = 0.2989 * output[:, 0:1, :, :] + 0.5870 * output[:, 1:2, :, :] + 0.1140 * output[:, 2:, :, :]
            gray_target = 0.2989 * target[:, 0:1, :, :] + 0.5870 * target[:, 1:2, :, :] + 0.1140 * target[:, 2:, :, :]
        elif output.shape[1]==1 and target.shape[1]==1:
            # 灰度图像无需转换
            gray_output = output
            gray_target = target
        else:
            # 抛出异常,因为output和target的通道数不是1或3
            raise ValueError(
                "output和target的通道数不受支持。对于两者,期望的通道数为1或3,但实际得到的output通道数为{},target通道数为{}。".format(
                    output.shape[1], target.shape[1]))

        # 计算x和y方向的梯度图
        gx_target = F.conv2d(gray_target, self.kernel_x, stride=1, padding=1)
        gy_target = F.conv2d(gray_target, self.kernel_y, stride=1, padding=1)
        gx_output = F.conv2d(gray_output, self.kernel_x, stride=1, padding=1)
        gy_output = F.conv2d(gray_output, self.kernel_y, stride=1, padding=1)

        # 将图像展开为块
        gx_target_patches = self.unfold(gx_target)
        gy_target_patches = self.unfold(gy_target)
        gx_output_patches = self.unfold(gx_output)
        gy_output_patches = self.unfold(gy_output)

        # 计算梯度图的方差
        var_target_x = torch.var(gx_target_patches, dim=1)
        var_output_x = torch.var(gx_output_patches, dim=1)
        var_target_y = torch.var(gy_target_patches, dim=1)
        var_output_y = torch.var(gy_output_patches, dim=1)

        # 计算梯度方差损失
        gradvar_loss = F.mse_loss(var_target_x, var_output_x) + F.mse_loss(var_target_y, var_output_y)

        # 返回总的梯度方差损失
        return gradvar_loss


if __name__ == '__main__':
    # 创建随机的RGB图像张量output和target
    output = torch.randn(1, 3, 512, 512)
    target = torch.randn(1, 3, 512, 512)

    # 设置梯度方差损失的权重
    gradloss_weight = 0.01

    # 检查是否有可用的GPU,并设置设备
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 将output和target张量移动到设备上
    output = output.to(device)
    target = target.to(device)

    # 实例化GradientVariance对象,并移动到设备上
    grad_criterion = GradientVariance(patch_size=8).to(device)

    # 计算梯度方差损失
    loss_grad = gradloss_weight * grad_criterion(output, target)

    # 打印损失值
    print(f'loss_grad: {loss_grad.item()}')

    inp = torch.tensor([[[[1.0, 2, 3, 4, 5, 6],
                          [7, 8, 9, 10, 11, 12],
                          [13, 14, 15, 16, 17, 18],
                          [19, 20, 21, 22, 23, 24],
                          [25, 26, 27, 28, 29, 30],
                          [31, 32, 33, 34, 35, 36],
                          ]]])
    print('inp=')
    print(inp)

    unfold = nn.Unfold(kernel_size=(2, 2), dilation=1, padding=0, stride=(2, 2))
    inp_unf = unfold(inp)
    print('inp_unf=')
    print(inp_unf)
    """
loss_grad: 0.08774767816066742
inp=
tensor([[[[ 1.,  2.,  3.,  4.,  5.,  6.],
          [ 7.,  8.,  9., 10., 11., 12.],
          [13., 14., 15., 16., 17., 18.],
          [19., 20., 21., 22., 23., 24.],
          [25., 26., 27., 28., 29., 30.],
          [31., 32., 33., 34., 35., 36.]]]])
inp_unf=
tensor([[[ 1.,  3.,  5., 13., 15., 17., 25., 27., 29.],
         [ 2.,  4.,  6., 14., 16., 18., 26., 28., 30.],
         [ 7.,  9., 11., 19., 21., 23., 31., 33., 35.],
         [ 8., 10., 12., 20., 22., 24., 32., 34., 36.]]])

        """

loss_grad:0.09038279205560684

  • 12
    点赞
  • 15
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值