PhyCRNet模型构建完成
→ T r a i n L o s s \rightarrow Train Loss →TrainLoss
part5 Loss
1、Conv2dDerivative
class Conv2dDerivative(nn.Module):
def __init__(self, DerFilter, resol, kernel_size=3, name=''):
super(Conv2dDerivative, self).__init__()
self.resol = resol # constant in the finite difference
self.name = name
self.input_channels = 1
self.output_channels = 1
self.kernel_size = kernel_size
self.padding = int((kernel_size - 1) / 2)
self.filter = nn.Conv2d(self.input_channels, self.output_channels, self.kernel_size,
1, padding=0, bias=False)
# Fixed gradient operator
self.filter.weight = nn.Parameter(torch.FloatTensor(DerFilter), requires_grad=False)
def forward(self, input):
derivative = self.filter(input)
return derivative / self.resol
filter
filter=conv2d(1, 1, 3x3, 1, padding=0, bias=False)
- 通过给filter传参,得到derivative
- 定义了filter的权重,weight
- nn.Parameter()表示对weight会进行优化
derivative
derivative = self.filter(input)
输出Conv2d网络的output
resol
有限差分中的常数
- laplace_operator:(dx**2)
- dx_operator:(dx*1)
- dy_operator:(dy*1)
最后Conv2dDerivative返回 d e r i v a t i v e r e s o l \frac {derivative}{resol} resolderivative
2、Conv1dDerivative
class Conv1dDerivative(nn.Module):
def __init__(self, DerFilter, resol, kernel_size=3, name=''):
super(Conv1dDerivative, self).__init__()
self.resol = resol # $\delta$*constant in the finite difference
self.name = name
self.input_channels = 1
self.output_channels = 1
self.kernel_size = kernel_size
self.padding = int((kernel_size - 1) / 2)
self.filter = nn.Conv1d(self.input_channels, self.output_channels, self.kernel_size,
1, padding