对抗神经网络求解PDE,简称WAN

参考的主要文献是:
Weak adversarial networks for high-dimensional partial
differential equations
在这里插入图片描述
在这里插入图片描述
L o s s = l n ( r ) + β ∫ ∂ Ω ( u − g ) 2 d s Loss =ln(r)+\beta\int_{\partial\Omega}(u-g)^{2}ds Loss=ln(r)+βΩ(ug)2ds
这里的损失函数与我们正常的认知没有区别,WAN代码的核心就是,测试函数 v v v不再是我们事先给定的,与近似解 u u u一样,都是神经网络的输出。也就是说 v = v θ , u = u η v=v_{\theta},u=u_{\eta} v=vθ,u=uη。求解的问题是:
在这里插入图片描述
重点: v v v的形成虽然是通过神经网络,但是为了保证 v v v在边界上取0,我们需要额外加一个长度因子函数(参考我前面文章的定义)

import numpy as np
import matplotlib.pyplot as plt
import torch
import time
import torch.nn as nn
def UU(X, order,prob):
    if prob==1:
        temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
        if order[0]==0 and order[1]==0:
            return torch.log(temp)
        if order[0]==1 and order[1]==0:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
        if order[0]==0 and order[1]==1:
            return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
        if order[0]==2 and order[1]==0:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
        if order[0]==1 and order[1]==1:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
                   * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
                   + temp**(-1) * (18)
        if order[0]==0 and order[1]==2:
            return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
                   + temp**(-1) * (22)
    if prob==2:
        temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
        temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
        if order[0]==0 and order[1]==0:
            return temp1 * temp2**(-1)
        if order[0]==1 and order[1]==0:
            return (2*X[:,0]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,0])
        if order[0]==0 and order[1]==1:
            return (-2*X[:,1]) * temp2**(-1) + \
                   temp1 * (-1)*temp2**(-2) * (2*X[:,1])
        if order[0]==2 and order[1]==0:
            return (2) * temp2**(-1) + \
                   2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
        if order[0]==1 and order[1]==1:
            return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
        if order[0]==0 and order[1]==2:
            return (-2) * temp2**(-1) + \
                   2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
                   temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
                   temp1 * (-1)*temp2**(-2) * (2)
    if prob==3:
        temp = torch.exp(-4*X[:,1]*X[:,1])
        if order[0]==0 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * temp + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * temp
        if order[0]==1 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * temp + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * temp
        if order[0]==0 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
        if order[0]==2 and order[1]==0:
            ind = (X[:,0]<=0).float()
            return ind * (12*(X[:,0]+1)**2) * temp + \
                   (1-ind) * (-12*(-X[:,0]+1)**2) * temp
        if order[0]==1 and order[1]==1:
            ind = (X[:,0]<=0).float()
            return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
                   (1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
        if order[0]==0 and order[1]==2:
            ind = (X[:,0]<=0).float()
            return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
                   (1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):
    return -UU(X,[2,0],prob) - UU(X,[0,2],prob)
class INSET():
    def __init__(self, bound, nx,prob):
        self.bound = bound
        self.nx = nx
        self.hx = [(self.bound[0,1]-self.bound[0,0])/self.nx[0],
                   (self.bound[1,1]-self.bound[1,0])/self.nx[1]]
        self.prob = prob
        self.size = self.nx[0]*self.nx[1]
        self.X = torch.zeros(self.size,2)
        m = 0
        for i in range(self.nx[0]):
            for j in range(self.nx[1]):
                self.X[m,0] = self.bound[0,0] + (i + 0.5)*self.hx[0]
                self.X[m,1] = self.bound[1,0] + (j + 0.5)*self.hx[1]
                m = m + 1
        #plt.scatter(self.X[:,0],self.X[:,1])
        self.right = FF(self.X,prob).view(-1,1)
        self.u_acc = UU(self.X,[0,0],prob).reshape(self.size,1)

class BDSET():#边界点取值
    def __init__(self,bound,nx,prob):
        self.dim = 2
        #self.area = (bound[0,1] - bound[0,0])*(bound[1,1] - bound[1,0])
        self.hx = [(bound[0,1] - bound[0,0])/nx[0],(bound[1,1] - bound[1,0])/nx[1]]
        self.size = 2*(nx[0] + nx[1])
        self.X = torch.zeros(self.size,self.dim)#储存内点
        m = 0
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,0] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,1]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        for i in range(nx[0]):
            self.X[m,0] = bound[0,0] + (i + 0.5)*self.hx[0]
            self.X[m,1] = bound[1,1] 
            m = m + 1
        for j in range(nx[1]):
            self.X[m,0] = bound[0,0]
            self.X[m,1] = bound[1,0] + (j + 0.5)*self.hx[1]
            m = m + 1
        #plt.scatter(self.X[:,0],self.X[:,1])
        self.u_acc = UU(self.X,[0,0],prob).view(-1,1)#储存内点精确解
class TESET():
    def __init__(self, bound, nx,prob):
        self.bound = bound
        self.nx = nx
        self.hx = [(self.bound[0,1]-self.bound[0,0])/self.nx[0],
                   (self.bound[1,1]-self.bound[1,0])/self.nx[1]]
        self.prob = prob
        self.size = (self.nx[0] + 1)*(self.nx[1] + 1)
        self.X = torch.zeros(self.size,2)
        m = 0
        for i in range(self.nx[0] + 1):
            for j in range(self.nx[1] + 1):
                self.X[m,0] = self.bound[0,0] + i*self.hx[0]
                self.X[m,1] = self.bound[1,0] + j*self.hx[1]
                m = m + 1
        #plt.scatter(self.X[:,0],self.X[:,1])
        self.u_acc = UU(self.X,[0,0],prob).reshape(self.size,1)

上面这部分是采样。

 np.random.seed(1234)
torch.manual_seed(1234)
class NETG(nn.Module):#u = netf*lenthfactor + netg,此为netg
    def __init__(self):
        super(NETG,self).__init__()
        self.fc1 = torch.nn.Linear(2,10)
        self.fc2 = torch.nn.Linear(10,10)
        self.fc3 = torch.nn.Linear(10,1)
    def forward(self,x):
        out = torch.sin(self.fc1(x)) + x@torch.eye(x.size(-1),10)
        out = torch.sin(self.fc2(out)) + out@torch.eye(out.size(-1),10)
        #注意这个是x.size(-1),表示当BDSET,或者TESET的样本点输入的时候,取的是[m,2]的2,如果是INSET的样本点输入,取得是[m,4,2]的2
        return self.fc3(out)
    
class SIN(nn.Module):#u = netg*lenthfactor + netf,此为netg网络所用的激活函数
    def __init__(self,order):
        super(SIN,self).__init__()
        self.e = order
    def forward(self,x):
        return torch.sin(x)**self.e
class Res(nn.Module):
    def __init__(self,input_size,output_size):
        super(Res,self).__init__()
        self.model = nn.Sequential(
            nn.Linear(input_size,output_size),
            SIN(1),
            nn.Linear(output_size,output_size),
            SIN(1)
        )
        self.input = input_size
        self.output = output_size
    def forward(self,x):
        out = self.model(x) + x@torch.eye(x.size(-1),self.output)#模拟残差网络
        #注意这个是x.size(-1),表示当BDSET,或者TESET的样本点输入的时候,取的是[m,2]的2,如果是INSET的样本点输入,取得是[m,4,2]的2
        return out
class NETF(nn.Module):#u = netg*lenthfactor + netf,此为netg,此netg逼近内部点取值
    def __init__(self):
        super(NETF,self).__init__()
        self.model = nn.Sequential(
            Res(2,10),
            Res(10,10),
            Res(10,10)
        )
        self.fc = torch.nn.Linear(10,1)
    def forward(self,x):
        out = self.model(x)
        return self.fc(out)
def pred(netf,X):
    return netf.forward(X)
class LEN():
    def __init__(self,bound,mu):
        self.mu = mu
        self.bound = bound
    def forward(self,X):
        L = 1.0
        
        for i in range(2):
            L = L*(1 - (1 - (X[:,i] - self.bound[i,0]))**self.mu)
            L = L*(1 - (1 - (self.bound[i,1] - X[:,i]))**self.mu)
        return L.view(-1,1)
bound = torch.tensor([[-1,2],[-1,1]]).float()
nx = [30,20]
nx_te = [30,40]
prob = 2
mu = 3
lr = 1e-2
lenth = LEN(bound,mu)

inset = INSET(bound,nx,prob)
bdset = BDSET(bound,nx,prob)
print(lenth.forward(bdset.X).shape)


这里重点关注损失函数Loss中测试函数v的定义
下面结合代码重点阐述训练过程。

def Loss(netg,netf,inset,bdset,lenth,beta):
    inset.X.requires_grad = True
    v = netg.forward(inset.X)*lenth.forward(inset.X)
    u = netf.forward(inset.X)
    ux, = torch.autograd.grad(u, inset.X, create_graph=True, retain_graph=True,
                              grad_outputs=torch.ones(inset.size,1))
    
    vx, = torch.autograd.grad(v, inset.X, create_graph=True, retain_graph=True,
                              grad_outputs=torch.ones(inset.size,1))
    r = (((ux*vx).sum(1)).mean() - (u*v).mean())**2
    out_in = torch.log(r) - torch.log((v**2).mean())
    
    ub = netf.forward(bdset.X)
    out_b = ((ub - bdset.u_acc)**2).mean()
    return out_in + beta*out_b
def error(u_pred, u_acc):
    return (((u_pred-u_acc)**2).sum() / (u_acc**2).sum()) ** (0.5)
def train(netg,netf,inset,bdset,lenth,beta,g_num,f_num,epoch):
    print('train neural network netg and netf')
    loss = Loss(netg,netf,inset,bdset,lenth,beta)
    print('epoch:%d,lossf:%.3e'%(0,loss.item()))
    
    #事先保存网络参数
    torch.save(netg.state_dict(),'best_netg.pkl')
    torch.save(netf.state_dict(),'best_netf.pkl')
    
    record = 50
    
    for i in range(epoch):
        st = time.time()
        for j in range(record):
            
            #训练神经网络netf
            netg.load_state_dict(torch.load('best_netg.pkl'))
            lossfoptimal = Loss(netg,netf,inset,bdset,lenth,beta)
            for f in range(f_num):#f_num很小
                optimf.zero_grad()
                lossf = Loss(netg,netf,inset,bdset,lenth,beta)
                lossf.backward()
                optimf.step()
            if lossf < lossfoptimal:
                lossfoptimal = lossf
                torch.save(netf.state_dict(),'best_netf.pkl')
            #训练神经网络netg
            netf.load_state_dict(torch.load('best_netf.pkl'))
            lossgoptimal = - lossfoptimal
            for g in range(g_num):#g_num很小
                optimg.zero_grad()
                lossg = - Loss(netg,netf,inset,bdset,lenth,beta)
                lossg.backward()
                optimg.step()
            if lossg < lossgoptimal:
                lossgoptimal = lossg
                torch.save(netg.state_dict(),'best_netg.pkl')
        ela = time.time() - st
        ERROR = error(netf.forward(inset.X),inset.u_acc)
        loss = Loss(netg,netf,inset,bdset,lenth,beta)
        print('epoch:%d,error:%.3e,lossf:%.3e,time:%.2f'%((i + 1)*record*(g_num + f_num),ERROR.item(),loss.item(),ela))
        
bound = torch.tensor([[-1,2],[-1,1]]).float()
nx = [30,30]
nx_te = [30,40]
prob = 2
inset = INSET(bound,nx,prob)
bdset = BDSET(bound,nx,prob)
teset = TESET(bound,nx_te,prob)
beta = 5e2
mu = 3

lenth = LEN(bound,mu)

netg = NETG()
netf = NETF()
lr = 1e-2
optimg = torch.optim.Adam(netg.parameters(), lr=lr)
optimf = torch.optim.Adam(netf.parameters(), lr=lr)
g_num = 1
f_num = 2
epoch = 5
train(netg,netf,inset,bdset,lenth,beta,g_num,f_num,epoch)

这里涉及两个神经网络的训练,与之前不同的是,这两个神经网络必须交替训练,而且,交替训练的次数很小。比如说我这里先训练针对近似解 u u u的NETG,再训练针对测试函数 v v v的NETF,其中每训练NETG一次,对应训练NETF两次,不断循环这个过程。
然而十分遗憾的是,WAN算法的训练效果不好,并且不稳定。这里的结果与论文提及的精度相差甚远,希望读者指出错误。

  • 1
    点赞
  • 21
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 3
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Galerkin码农选手

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值