import torch
import torch.nn as nn
import numpy as np
from torch.optim import Adam, SGD
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from torch.autograd import grad as torch_grad
import torch.nn.utils.weight_norm as weightNorm
from utils.util import*
device = torch.device("cuda"if torch.cuda.is_available()else"cpu")
dim =128
LAMBDA =10# Gradient penalty lambda hyperparameterclassTReLU(nn.Module):def__init__(self):super(TReLU, self).__init__()#子类继承了父类的所有属性和方法,父类属性自然会用父类方法来进行初始化
self.alpha = nn.Parameter(torch.FloatTensor(1), requires_grad=True)#将一个不可训练的类型Tensor转换成可以训练的类型parameter并将这个parameter绑定到这个module里面,self.alpha变成了模型的一部分,成为了模型中根据训练可以改动的参数了,让某些变量在学习的过程中不断的修改其值以达到最优化,Tensor可以通过参数 requires_grad=True 创建,
self.alpha.data.fill_(0)defforward(self, x):
x = F.relu(x - self.alpha)+ self.alpha
return x
classDiscriminator(nn.Module):def__init__(self):super(Discriminator, self).__init__()
self.conv0 = weightNorm(nn.Conv2d(6,16,5,2,2))
self.conv1 = weightNorm(nn.Conv2d(16,32,5,2,2))
self.conv2 = weightNorm(nn.Conv2d(32,64,5,2,2))
self.conv3 = weightNorm(nn.Conv2d(64,128,5,2,2))
self.conv4 = weightNorm(nn.Conv2d(128,1,1,1,0))
self.relu0 = TReLU()
self.relu1 = TReLU()
self.relu2 = TReLU()
self.relu3 = TReLU()defforward(self, x):
x = self.conv0(x)
x = self.relu0(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.conv4(x)
x = x