六角形区域求解Drichlet边界泊松方程
import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import bfgs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
np.random.seed(1234)
torch.manual_seed(1234)
def UU(X, order,prob):
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return torch.log(temp)
if order[0]==1 and order[1]==0:
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
if order[0]==0 and order[1]==0:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==0:
return (3*X[:,0]*X[:,0]-1) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==1:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==2 and order[1]==0:
return (6*X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==1:
return (3*X[:,0]*X[:,0]-1) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==2:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if prob==3:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if prob==4:
temp = torch.exp(-4*X[:,1]*X[:,1])
if order[0]==0 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * temp + \
(1-ind) * (-(-X[:,0]+1)**4+1) * temp
if order[0]==1 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * temp + \
(1-ind) * (4*(-X[:,0]+1)**3) * temp
if order[0]==0 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
if order[0]==2 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (12*(X[:,0]+1)**2) * temp + \
(1-ind) * (-12*(-X[:,0]+1)**2) * temp
if order[0]==1 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
if order[0]==0 and order[1]==2:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):
return -UU(X,[2,0],prob) - UU(X,[0,2],prob)
class INSET():
def __init__(self, radius, nx,prob):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/(nx*np.sqrt(3.0))
self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx - 1):
for j in range(self.nx + 0):
self.X[m,0] = (1 + i + 0.5*j) * self.hx
self.X[m,1] = j * 0.5*3**0.5*self.hx
m = m + 1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*(self.nx - 1)*(self.nx + 0)
ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)
ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.right = FF(self.X,prob).reshape(-1,1)
self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
class BDSET():
def __init__(self, radius, nx,prob):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/(nx*np.sqrt(3.0))
self.lenth = 12*self.nx*self.hx
self.size = 12*self.nx
self.X = torch.zeros(self.size,self.dim)
self.n = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx):
self.X[m,0] = -(i+0.5) * 0.5*self.hx
self.X[m,1] = self.radius - (i+0.5) * 0.5*3**0.5*self.hx
self.n[m,0] = -0.5*3**0.5
self.n[m,1] = 0.5
m = m+1
for i in range(self.nx):
self.X[m,0] = -0.5*self.radius/3**0.5 - (i+0.5) * self.hx
self.X[m,1] = 0.5*self.radius
self.n[m,0] = 0.0
self.n[m,1] = 1.0
m = m+1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*2*self.nx; ind1 = (k+1)*2*self.nx; ind2 = (k+2)*2*self.nx
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.n[ind1:ind2,:] = self.n[ind0:ind1,:].matmul(RM)
self.Dright = UU(self.X,[0,0],prob).reshape(-1,1)
class TESET():
def __init__(self, radius, nx,prob):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/(nx*np.sqrt(3.0))
self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx - 1):
for j in range(self.nx + 0):
self.X[m,0] = (1 + i + 0.5*j) * self.hx
self.X[m,1] = j * 0.5*3**0.5*self.hx
m = m + 1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*(self.nx - 1)*(self.nx + 0)
ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)
ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
class LEN():
def __init__(self,bdset):
self.n = bdset.nx
self.X = bdset.X
self.radius = bdset.radius
self.dim = 2
self.mu = 1
self.num = 6
self.ee = 1e-1
self.L_max = 1.0
self.Nodes()
self.xt = torch.tensor([[0,0.0]])
self.L_max = max(self.forward(self.xt))
def Nodes(self):
self.inp = []
for i in range(self.num):
nodes = torch.zeros(2*(2*self.n),self.dim)
inds = (i*2*self.n)%(12*self.n);inde = inds + 2*self.n
nodes[:2*self.n,:] = self.X[inds:inde,:]
inds = (i*2*self.n + 6*self.n)%(12*self.n);inde = inds + 2*self.n
nodes[2*self.n:,:] = self.X[inds:inde,:]
self.inp.append(nodes)
def phi(self,X,Y):
dist = ((X - Y)**2).sum(1)
return (dist + self.ee)**(-0.5)
def coef(self,k):
nodes = self.inp[k]
size = nodes.shape[0] + nodes.shape[1] + 1
A = torch.zeros(size,size)
N = nodes.shape[0]
for i in range(N):
A[0:N,i] = self.phi(nodes,nodes[i,:])
A[:N,N:size - 1] = nodes
A[N:(size - 1),:N] = nodes.t()
A[:N,-1] = torch.ones(N)
A[-1,:N] = torch.ones(N)
b = torch.zeros(size,1)
b[:2*self.n,:] = torch.ones(2*self.n,1)
xishu,un = torch.solve(b,A)
#xishu = torch.linalg.solve(A,b)#服务器用这个
return xishu
def lk(self,k,X):
nodes = self.inp[k]#[60,2]
size = nodes.shape[0] + nodes.shape[1] + 1
N = nodes.shape[0]
value = torch.zeros(X.shape[0])
xishu = self.coef(k)
for i in range(N):
value += xishu[i]*self.phi(X,nodes[i,:])
for j in range(nodes.shape[1]):
value += xishu[N + j]*X[:,j]
return value + xishu[-1]
def forward(self,X):
L = 1.0
for i in range(self.num):
L = L*(1 - (1 - self.lk(i,X))**self.mu)
return (L/self.L_max).view(-1,1)
class Net(torch.nn.Module):
def __init__(self, layers, dtype):
super(Net, self).__init__()
self.layers = layers
self.layers_hid_num = len(layers)-2
self.device = device
self.dtype = dtype
fc = []
for i in range(self.layers_hid_num):
fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))
self.fc = torch.nn.Sequential(*fc)
for i in range(self.layers_hid_num):
self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)
self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)
self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)
self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)
self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)
self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)
def forward(self, x):
dev = x.device
for i in range(self.layers_hid_num):
h = torch.sin(self.fc[2*i](x))
h = torch.sin(self.fc[2*i+1](h))
temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)
x = h + x@temp
return self.fc[-1](x)
def pred(netf,X):
return netf.forward(X)
def error(u_pred, u_acc):
u_pred = u_pred.to(device)
u_acc = u_acc.to(device)
#return (((u_pred-u_acc)**2).sum()/(u_acc**2).sum()) ** (0.5)
return (((u_pred-u_acc)**2).mean()) ** (0.5)
# ----------------------------------------------------------------------------------------------------
def loadtype(inset,bdset,teset,dtype):
inset.X = inset.X.type(dtype)
inset.right = inset.right.type(dtype)
inset.u_acc = inset.u_acc.type(dtype)
bdset.X = bdset.X.type(dtype)
bdset.Dright = bdset.Dright.type(dtype)
teset.X = teset.X.type(dtype)
teset.u_acc = teset.u_acc.type(dtype)
def loadcuda(inset,bdset,teset,netf):
netf = netf.to(device)
if inset.X.requires_grad == False:
inset.X.requires_grad = True
inset.X = inset.X.to(device)
inset.u_acc = inset.u_acc.to(device)
bdset.X = bdset.X.to(device)
inset.right = inset.right.to(device)
bdset.Dright = bdset.Dright.to(device)
teset.X = teset.X.to(device)
teset.u_acc = teset.u_acc.to(device)
def loadcpu(inset,bdset,teset,netf):
netf = netf.to('cpu')
#inset.X.requires_grad = True
inset.X = inset.X.to('cpu')
inset.u_acc = inset.u_acc.to('cpu')
bdset.X = bdset.X.to('cpu')
inset.right = inset.right.to('cpu')
bdset.Dright = bdset.Dright.to('cpu')
teset.X = teset.X.to('cpu')
teset.u_acc = teset.u_acc.to('cpu')
def Lossf(netf,inset,bdset):
if inset.X.requires_grad is not True:
inset.X.requires_grad = True
insetF = netf.forward(inset.X)
# print('insetF device: {}'.format(insetF.device))
insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
u_in = insetF#inset.G为netg在inset.X上取值,后面训练时提供,此举为加快迭代速度
ux = insetFx#复合函数求导,提高迭代效率
taux, = torch.autograd.grad(ux[:,0:1], inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
tauy, = torch.autograd.grad(ux[:,1:2], inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
out_in = ((taux[:,0:1] + tauy[:,1:2] + inset.right)**2 + (taux[:,1:2] - tauy[:,0:1])**2).mean()
ub = netf.forward(bdset.X)
out_b = ((ub - bdset.Dright)**2).mean()
beta = 5e2
loss = out_in + beta*out_b
loss = torch.sqrt(loss)
return loss
# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
print('train neural network f')
ERROR,BUZHOU = [],[]
lossf = Lossf(netf,inset,bdset)
lossoptimal = lossf
trainerror = error(netf.forward(inset.X), inset.u_acc)
print('epoch: %d, loss: %.3e, trainerror: %.3e'
%(0, lossf.item(), trainerror.item()))
torch.save(netf.state_dict(),'best_netf.pkl')
cycle = 100
for i in range(epochf):
st = time.time()
'''
for j in range(cycle):
optimf.zero_grad()
lossf = Lossf(netf,inset)
lossf.backward()
optimf.step()
'''
def closure():
optimf.zero_grad()
lossf = Lossf(netf,inset,bdset)
lossf.backward()
return lossf
optimf.step(closure)
lossf = Lossf(netf,inset,bdset)
if lossf < lossoptimal:
lossoptimal = lossf
torch.save(netf.state_dict(),'best_netf.pkl')
ela = time.time() - st
trainerror = error(netf.forward(inset.X), inset.u_acc)
ERROR.append(trainerror.item())
BUZHOU.append((i + 1)*cycle)
print('epoch:%d,lossf:%.3e,Linf error:%.3e,train error:%.3e,time:%.2f'%
((i + 1)*cycle,lossf.item(),max(abs(netf.forward(inset.X) - inset.u_acc)),trainerror,ela))
return ERROR,BUZHOU
prob = 3
radius = 2.0
nx_tr = 40
nx_te = 100
epochf = 20
lr = 1e0
tests_num = 1
#dtype = torch.float32
dtype = torch.float64
# ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)
inset = INSET(radius,nx_tr,prob)
bdset = BDSET(radius,nx_te,prob)
teset = TESET(radius,nx_te,prob)
loadtype(inset,bdset,teset,dtype)
lay = [2,20,20,1];netf = Net(lay,dtype)
loadcuda(inset,bdset,teset,netf)
for it in range(tests_num):
'''
optimf = torch.optim.LBFGS(netf.parameters(), lr=lr,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
'''
optimf = bfgs.BFGS(netf.parameters(), lr=lr,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
start_time = time.time()
ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))
netf.load_state_dict(torch.load('best_netf.pkl'))
te_U = pred(netf,teset.X)
testerror[it] = error(te_U, teset.u_acc)
print('prob:%d,Linf error:%.4e,testerror = %.3e\n' %(prob,max(abs(te_U - teset.u_acc)),testerror[it].item()))
print(testerror.data)
testerror_mean = testerror.mean()
testerror_std = testerror.std()
print('testerror_mean = %.3e, testerror_std = %.3e'
%(testerror_mean.item(),testerror_std.item()))
loadcpu(inset,bdset,teset,netf)
torch.cuda.empty_cache()
PFNN求解泊松方程
此时由于长度因子不好选取,所以求解效果很差,读者自己需要调整参数
import torch
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import torch.nn as nn
import torch.nn.functional as F
import bfgs
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#device = 'cpu'
np.random.seed(1234)
torch.manual_seed(1234)
def UU(X, order,prob):
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return torch.log(temp)
if order[0]==1 and order[1]==0:
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
if order[0]==0 and order[1]==0:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==0:
return (3*X[:,0]*X[:,0]-1) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==1:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==2 and order[1]==0:
return (6*X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==1:
return (3*X[:,0]*X[:,0]-1) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==2:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if prob==3:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if prob==4:
temp = torch.exp(-4*X[:,1]*X[:,1])
if order[0]==0 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * temp + \
(1-ind) * (-(-X[:,0]+1)**4+1) * temp
if order[0]==1 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * temp + \
(1-ind) * (4*(-X[:,0]+1)**3) * temp
if order[0]==0 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
if order[0]==2 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (12*(X[:,0]+1)**2) * temp + \
(1-ind) * (-12*(-X[:,0]+1)**2) * temp
if order[0]==1 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
if order[0]==0 and order[1]==2:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def FF(X,prob):
return -UU(X,[2,0],prob) - UU(X,[0,2],prob)
class INSET():
def __init__(self, radius, nx,prob):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/(nx*np.sqrt(3.0))
self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx - 1):
for j in range(self.nx + 0):
self.X[m,0] = (1 + i + 0.5*j) * self.hx
self.X[m,1] = j * 0.5*3**0.5*self.hx
m = m + 1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*(self.nx - 1)*(self.nx + 0)
ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)
ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.right = FF(self.X,prob).reshape(-1,1)
self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
class BDSET():
def __init__(self, radius, nx,prob):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/(nx*np.sqrt(3.0))
self.lenth = 12*self.nx*self.hx
self.size = 12*self.nx
self.X = torch.zeros(self.size,self.dim)
self.n = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx):
self.X[m,0] = -(i+0.5) * 0.5*self.hx
self.X[m,1] = self.radius - (i+0.5) * 0.5*3**0.5*self.hx
self.n[m,0] = -0.5*3**0.5
self.n[m,1] = 0.5
m = m+1
for i in range(self.nx):
self.X[m,0] = -0.5*self.radius/3**0.5 - (i+0.5) * self.hx
self.X[m,1] = 0.5*self.radius
self.n[m,0] = 0.0
self.n[m,1] = 1.0
m = m+1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*2*self.nx; ind1 = (k+1)*2*self.nx; ind2 = (k+2)*2*self.nx
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.n[ind1:ind2,:] = self.n[ind0:ind1,:].matmul(RM)
self.Dright = UU(self.X,[0,0],prob).reshape(-1,1)
class TESET():
def __init__(self, radius, nx,prob):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/(nx*np.sqrt(3.0))
self.size = 6 * (self.nx - 1)*(self.nx + 0) + 1
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx - 1):
for j in range(self.nx + 0):
self.X[m,0] = (1 + i + 0.5*j) * self.hx
self.X[m,1] = j * 0.5*3**0.5*self.hx
m = m + 1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*(self.nx - 1)*(self.nx + 0)
ind1 = (k+1)*(self.nx - 1)*(self.nx + 0)
ind2 = (k+2)*(self.nx - 1)*(self.nx + 0)
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.u_acc = UU(self.X,[0,0],prob).reshape(-1,1)
'''
class LEN():
def __init__(self,bdset,n_b,n_i,mu):#n_b = 3,n_i = 4
self.n = bdset.nx
self.X = bdset.X
self.radius = bdset.radius
self.dim = 2
self.mu = mu
self.n_i = n_i
self.n_b = n_b
self.ee = 1e-4
self.L_max = 1.0
self.Nodes()
self.xt = torch.tensor([[0,0.0]])
def Nodes(self):
self.inp = []
for i in range(self.n_i):
nodes = torch.zeros(2*(self.n_b*self.n),self.dim)
inds = (i*self.n_b*self.n)%(12*self.n);inde = inds + self.n_b*self.n
nodes[:self.n_b*self.n,:] = self.X[inds:inde,:]
inds = (i*self.n_b*self.n + 6*self.n)%(12*self.n);inde = inds + self.n_b*self.n
nodes[self.n_b*self.n:,:] = self.X[inds:inde,:]
self.inp.append(nodes)
def phi(self,X,Y):
dist = ((X - Y)**2).sum(1)
return (dist + self.ee)**(-0.5)
def coef(self,k):
nodes = self.inp[k]
size = nodes.shape[0] + nodes.shape[1] + 1
A = torch.zeros(size,size)
N = nodes.shape[0]
for i in range(N):
A[0:N,i] = self.phi(nodes,nodes[i,:])
A[:N,N:size - 1] = nodes
A[N:(size - 1),:N] = nodes.t()
A[:N,-1] = torch.ones(N)
A[-1,:N] = torch.ones(N)
b = torch.zeros(size,1)
b[:self.n_b*self.n,:] = torch.ones(self.n_b*self.n,1)
xishu,un = torch.solve(b,A)
#xishu = torch.linalg.solve(A,b)
return xishu
def lk(self,k,X):
dev = X.device
nodes = self.inp[k].to(dev)
size = nodes.shape[0] + nodes.shape[1] + 1
N = nodes.shape[0]
value = torch.zeros(X.shape[0],device = dev)
xishu = self.coef(k).to(dev)
for i in range(N):
value += xishu[i]*self.phi(X,nodes[i,:])
for j in range(nodes.shape[1]):
value += xishu[N + j]*X[:,j]
tmp = value + xishu[-1]
return tmp
def forward(self,X):
L = 1.0
h = 1e-1
dev = X.device
for i in range(self.n_i):
tmp = (1 - (1 - self.lk(i,X))**self.mu)/h
temp = (1 - (1 - self.lk(i,self.xt.to(dev)))**self.mu)/h
L = L*(tmp)**2
return (L/self.L_max).to(dev).view(-1,1)
'''
class LEN():
def __init__(self,mu):
self.mu = mu
self.xt = torch.tensor([[0,0.0]])
def phi(self,k,order,X):
if k == 0:
ind00 = (X[:,1] > 0);ind01 = (X[:,1] > 1)
ind10 = (np.sqrt(3)*X[:,0] - X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] - 2 > 0)
ind = (ind00*~ind01*ind10*~ind11).float()
if order == [0,0]:
return ind*((X[:,1] - 1)*(np.sqrt(3)*X[:,0] - X[:,1] - 2))**2
elif order == [1,0]:
return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*np.sqrt(3)*(X[:,1] - 1)
elif order == [0,1]:
return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] - 2)
elif k == 1:
ind00 = (X[:,1] > 0);ind01 = (X[:,1] > - 1)
ind10 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] + X[:,1] - 2 > 0)
ind = (~ind00*ind01*ind10*~ind11).float()
if order == [0,0]:
return ind*((X[:,1] + 1)*(np.sqrt(3)*X[:,0] + X[:,1] - 2))**2
elif order == [1,0]:
return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*np.sqrt(3)*(X[:,1] + 1)
elif order == [0,1]:
return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*(np.sqrt(3)*X[:,0] + 2*X[:,1] - 2)
elif k == 2:
ind00 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind01 = (np.sqrt(3)*X[:,0] + X[:,1] + 2 > 0)
ind10 = (np.sqrt(3)*X[:,0] - X[:,1] - 2 > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] > 0)
ind = (~ind00*ind01*~ind10*ind11).float()
if order == [0,0]:
return ind*((np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] + X[:,1] + 2))**2
elif order == [1,0]:
return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*6*X[:,0]
elif order == [0,1]:
return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] - 2)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*(-2*X[:,1] - 4)
elif k == 3:
ind00 = (X[:,1] > 0);ind01 = (X[:,1] > - 1)
ind10 = (np.sqrt(3)*X[:,0] - X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] + 2 > 0)
ind = (~ind00*ind01*~ind10*ind11).float()
if order == [0,0]:
return ind*((X[:,1] + 1)*(np.sqrt(3)*X[:,0] - X[:,1] + 2))**2
elif order == [1,0]:
return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*np.sqrt(3)*(X[:,1] + 1)
elif order == [0,1]:
return ind*2*(X[:,1] + 1)*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] - 2*X[:,1] + 2)
elif k == 4:
ind00 = (X[:,1] > 0);ind01 = (X[:,1] > 1)
ind10 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind11 = (np.sqrt(3)*X[:,0] + X[:,1] + 2 > 0)
ind = (ind00*~ind01*~ind10*ind11).float()
if order == [0,0]:
return ind*((X[:,1] - 1)*(np.sqrt(3)*X[:,0] + X[:,1] + 2))**2
elif order == [1,0]:
return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*2*(X[:,1] - 1)
elif order == [0,1]:
return ind*2*(X[:,1] - 1)*(np.sqrt(3)*X[:,0] + X[:,1] + 2)*(np.sqrt(3)*X[:,0] + 2)
elif k == 5:
ind00 = (np.sqrt(3)*X[:,0] + X[:,1] > 0);ind01 = (np.sqrt(3)*X[:,0] + X[:,1] - 2 > 0)
ind10 = (np.sqrt(3)*X[:,0] - X[:,1] + 2 > 0);ind11 = (np.sqrt(3)*X[:,0] - X[:,1] > 0)
ind = (ind00*~ind01*ind10*~ind11).float()
if order == [0,0]:
return ind*((np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] + X[:,1] - 2))**2
elif order == [1,0]:
return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*6*X[:,0]
elif order == [0,1]:
return ind*2*(np.sqrt(3)*X[:,0] - X[:,1] + 2)*(np.sqrt(3)*X[:,0] + X[:,1] - 2)*(-2*X[:,1] + 4)
def forward(self,order,X):
L = 0.0
L_max = 0.0
dev = X.device
for i in range(6):
L = L + self.phi(i,order,X)
L_max = L_max + self.phi(i,order,self.xt.to(X.device))
return (L/L_max).reshape(-1,1)
class Net(torch.nn.Module):
def __init__(self, layers, dtype):
super(Net, self).__init__()
self.layers = layers
self.layers_hid_num = len(layers)-2
self.device = device
self.dtype = dtype
fc = []
for i in range(self.layers_hid_num):
fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[-2],self.layers[-1]))
self.fc = torch.nn.Sequential(*fc)
for i in range(self.layers_hid_num):
self.fc[2*i].weight.data = self.fc[2*i].weight.data.type(dtype)
self.fc[2*i].bias.data = self.fc[2*i].bias.data.type(dtype)
self.fc[2*i + 1].weight.data = self.fc[2*i + 1].weight.data.type(dtype)
self.fc[2*i + 1].bias.data = self.fc[2*i + 1].bias.data.type(dtype)
self.fc[-1].weight.data = self.fc[-1].weight.data.type(dtype)
self.fc[-1].bias.data = self.fc[-1].bias.data.type(dtype)
def forward(self, x):
dev = x.device
for i in range(self.layers_hid_num):
h = torch.sin(self.fc[2*i](x))
h = torch.sin(self.fc[2*i+1](h))
temp = torch.eye(x.shape[-1],self.layers[i+1],dtype = self.dtype,device = dev)
x = h + x@temp
return self.fc[-1](x)
def pred(netf,netg,lenth,X):
return netf.forward(X)*lenth.forward([0,0],X) + netg.forward(X)
'''
def pred(netf,netg,lenth,X):
return netf.forward(X)*lenth.forward(X) + netg.forward(X)
'''
def error(u_pred, u_acc):
u_pred = u_pred.to(device)
u_acc = u_acc.to(device)
#return (((u_pred-u_acc)**2).sum()/(u_acc**2).sum()) ** (0.5)
return (((u_pred-u_acc)**2).mean()) ** (0.5)
# ----------------------------------------------------------------------------------------------------
def loadtype(inset,bdset,teset,dtype):
inset.X = inset.X.type(dtype)
inset.right = inset.right.type(dtype)
inset.u_acc = inset.u_acc.type(dtype)
bdset.X = bdset.X.type(dtype)
bdset.Dright = bdset.Dright.type(dtype)
teset.X = teset.X.type(dtype)
teset.u_acc = teset.u_acc.type(dtype)
def loadcuda(inset,bdset,teset,netf,netg):
netf = netf.to(device)
netg = netg.to(device)
if inset.X.requires_grad == False:
inset.X.requires_grad = True
inset.X = inset.X.to(device)
inset.u_acc = inset.u_acc.to(device)
bdset.X = bdset.X.to(device)
inset.right = inset.right.to(device)
bdset.Dright = bdset.Dright.to(device)
teset.X = teset.X.to(device)
teset.u_acc = teset.u_acc.to(device)
def loadcpu(inset,bdset,teset,netf,netg):
netf = netf.to('cpu')
netg = netg.to('cpu')
#inset.X.requires_grad = True
inset.X = inset.X.to('cpu')
inset.u_acc = inset.u_acc.to('cpu')
bdset.X = bdset.X.to('cpu')
inset.right = inset.right.to('cpu')
bdset.Dright = bdset.Dright.to('cpu')
teset.X = teset.X.to('cpu')
teset.u_acc = teset.u_acc.to('cpu')
def Lossg(netg,bdset):
ub = netg.forward(bdset.X)
out_b = (ub - bdset.Dright)**2
return torch.sqrt(out_b.mean())
def Lossf(netf,inset):
if inset.X.requires_grad is not True:
inset.X.requires_grad = True
insetF = netf.forward(inset.X)
# print('insetF device: {}'.format(insetF.device))
insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
taux, = torch.autograd.grad(insetFx[:,0:1], inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
tauy, = torch.autograd.grad(insetFx[:,1:2], inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
uxx = taux[:,0:1]*inset.L + 2*insetFx[:,0:1]*inset.Lx + inset.Lxx*insetF + inset.Gxx
uyy = tauy[:,1:2]*inset.L + 2*insetFx[:,1:2]*inset.Ly + inset.Lyy*insetF + inset.Gyy
inset.res = (uxx + uyy + inset.right)**2 + inset.de
out_in = (inset.res).mean()
return (out_in)
def Traing(netg,bdset,optimg,max_epoch):
print('train neural network g')
lossbest = Lossg(netg,bdset)
for sub_epoch in range(max_epoch):
def closure():
loss = Lossg(netg,bdset)
optimg.zero_grad()
loss.backward()
return loss
optimg.step(closure)
loss = Lossg(netg,bdset)
if loss < lossbest:
lossbest = loss
torch.save(netg.state_dict(),'best_netg.pkl')
print('epoch:%d,loss_u:%.3e'%(sub_epoch + 1,loss.item()))
# Train neural network f
def Trainf(netf,netg, inset, lenth,optimf, epochf):
print('train neural network f')
ERROR,BUZHOU = [],[]
lossf = Lossf(netf,inset)
lossoptimal = lossf
trainerror = error(pred(netf,netg,lenth,inset.X), inset.u_acc)
print('epoch: %d, loss: %.3e, trainerror: %.3e'
%(0, lossf.item(), trainerror.item()))
torch.save(netf.state_dict(),'best_netf.pkl')
cycle = 100
for i in range(epochf):
st = time.time()
def closure():
optimf.zero_grad()
lossf = Lossf(netf,inset)
lossf.backward()
return lossf
optimf.step(closure)
lossf = Lossf(netf,inset)
if lossf < lossoptimal:
lossoptimal = lossf
torch.save(netf.state_dict(),'best_netf.pkl')
ela = time.time() - st
trainerror = error(pred(netf,netg,lenth,inset.X), inset.u_acc)
ERROR.append(trainerror.item())
BUZHOU.append((i + 1)*cycle)
print('epoch:%d,lossf:%.3e,Linf error:%.3e,train error:%.3e,time:%.2f'%
((i + 1)*cycle,lossf.item(),max(abs(pred(netf,netg,lenth,inset.X) - inset.u_acc)),trainerror,ela))
return ERROR,BUZHOU
def Train(netf,netg, inset, bdset, lenth,optimf, optimg,epochf,max_epoch):
if inset.X.requires_grad is not True:
inset.X.requires_grad = True
#inset.L = lenth.forward(inset.X)
inset.L = lenth.forward([0,0],inset.X)
inset.dL, = torch.autograd.grad(inset.L, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
inset.Lx = inset.dL[:,0:1]
inset.Ly = inset.dL[:,1:2]
inset.dLxx, = torch.autograd.grad(inset.Lx, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
inset.dLyy, = torch.autograd.grad(inset.Ly, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
inset.Lxx = inset.dLxx[:,0:1];inset.Lyy = inset.dLyy[:,1:2]
inset.L = inset.L.data
inset.Lx = inset.Lx.data;inset.Ly = inset.Ly.data
inset.Lxx = inset.Lxx.data;inset.Lyy = inset.Lyy.data;
inset.dL = inset.dL.data;inset.dLxx = inset.dLxx.data;inset.dLyy = inset.dLyy.data
Traing(netg,bdset,optimg,max_epoch)
netg.load_state_dict(torch.load('best_netg.pkl'))
err = pred(netf,netg,lenth,bdset.X) - bdset.Dright
print(max(abs(err)))
err_g = netg.forward(bdset.X) - bdset.Dright
print(max(abs(err_g)))
inset.G = netg.forward(inset.X)
inset.Gx, = torch.autograd.grad(inset.G, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
inset.dGxx, = torch.autograd.grad(inset.Gx[:,0:1], inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
inset.dGyy, = torch.autograd.grad(inset.Gx[:,1:2], inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1).to(device))
inset.Gxx = inset.dGxx[:,0:1];inset.Gyy = inset.dGyy[:,1:2]
inset.Gxx = inset.Gxx.data;inset.Gyy = inset.Gyy.data
inset.dGxx = inset.dGxx.data;inset.dGyy = inset.dGyy.data
inset.de = (inset.dLxx[:,1:2] - inset.dLyy[:,0:1])**2 + (inset.dGxx[:,1:2] - inset.dGyy[:,0:1])**2
inset.de = inset.de.data
ERROR,BUZHOU = [],[]
ERROR,BUZHOU = Trainf(netf,netg, inset, lenth,optimf, epochf)
netf.load_state_dict(torch.load('best_netf.pkl'))
return ERROR,BUZHOU
prob = 3
radius = 2.0
nx_tr = 40
nx_te = 100
max_epoch = 20
epochf = 20
lr_g = 1e0
lr_f = 1e0
tests_num = 1
#dtype = torch.float32
dtype = torch.float64
# ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)
inset = INSET(radius,nx_tr,prob)
bdset = BDSET(radius,nx_te,prob)
teset = TESET(radius,nx_te,prob)
loadtype(inset,bdset,teset,dtype)
layg = [2,20,20,1];netg = Net(layg,dtype)
layf = [2,20,20,1];netf = Net(layf,dtype)
loadcuda(inset,bdset,teset,netf,netg)
'''
n_b = 3;n_i = 4;mu = 1
lenth = LEN(bdset,n_b,n_i,mu)
'''
mu = 1
lenth = LEN(mu)
st = time.time()
for it in range(tests_num):
'''
optimg = torch.optim.LBFGS(netg.parameters(), lr=lr_g,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
optimf = torch.optim.LBFGS(netf.parameters(), lr=lr_f,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
'''
optimg = bfgs.BFGS(netg.parameters(), lr=lr_g,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
optimf = bfgs.BFGS(netf.parameters(), lr=lr_f,max_iter = 100,history_size=2500,
line_search_fn = 'strong_wolfe')
start_time = time.time()
ERROR,BUZHOU = Train(netf,netg, inset, bdset, lenth,optimf, optimg,epochf,max_epoch)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))
te_U = pred(netf,netg,lenth,teset.X)
testerror[it] = error(te_U, teset.u_acc)
print('prob:%d,Linf error:%.4e,testerror = %.3e\n' %(prob,max(abs(te_U - teset.u_acc)),testerror[it].item()))
print(testerror.data)
testerror_mean = testerror.mean()
testerror_std = testerror.std()
print('testerror_mean = %.3e, testerror_std = %.3e'
%(testerror_mean.item(),testerror_std.item()))
loadcpu(inset,bdset,teset,netf,netg)
torch.cuda.empty_cache()
ela = time.time() - st
print('train time:%.2f'%(ela))
极小曲面方程混合边界求解
import torch
import time
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
# ----------------------------------------------------------------------------------------------------
# Generate Data
# Solution
def UU(X, lamda, order):
temp = lamda * 0.5*(torch.exp(X[:,0]/lamda)+torch.exp(-X[:,0]/lamda))
if order[0]==0 and order[1]==0:
return temp * torch.sin(torch.acos(X[:,1]/temp))
if order[0]==1 and order[1]==0:
temp_x = 0.5*(torch.exp(X[:,0]/lamda)-torch.exp(-X[:,0]/lamda));
return temp_x * torch.sin(torch.acos(X[:,1]/temp)) + \
temp * torch.cos(torch.acos(X[:,1]/temp)) * \
(-(1-(X[:,1]/temp)**2)**(-0.5)) * \
(-X[:,1]/temp**2) * temp_x
if order[0]==0 and order[1]==1:
return temp * torch.cos(torch.acos(X[:,1]/temp)) * \
(-(1-(X[:,1]/temp)**2)**(-0.5)) * (1/temp)
# Right hand side of the Neumann boundary
def RR_N(X, n, lamda):
u_x1 = UU(X,lamda,[1,0]); u_x2 = UU(X,lamda,[0,1])
return 1/(1+u_x1*u_x1+u_x2*u_x2)**0.5 * (u_x1*n[:,0]+u_x2*n[:,1])
# ----------------------------------------------------------------------------------------------------
# Inner Set
class INSET():
def __init__(self, radius, nx, lamda):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/3**0.5/self.nx
self.area = 6 * self.radius/3**0.5 * self.radius/2
self.size = 6 * 2*self.nx**2
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx):
for j in range(self.nx):
self.X[2*m,0] = (0.5+i+0.5*j) * self.hx
self.X[2*m,1] = (j+1/3) * 0.5*3**0.5*self.hx
self.X[2*m+1,0] = (1+i+0.5*j) * self.hx
self.X[2*m+1,1] = (j+2/3) * 0.5*3**0.5*self.hx
m = m+1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*2*self.nx**2; ind1 = (k+1)*2*self.nx**2; ind2 = (k+2)*2*self.nx**2
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.u_acc = UU(self.X,lamda,[0,0]).reshape(self.size,1)
# Boundary set
class BDSET():
def __init__(self, radius, nx, lamda):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/3**0.5/self.nx
self.lenth = 12*self.nx*self.hx
self.size = 12*self.nx
self.X = torch.zeros(self.size,self.dim)
self.n = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx):
self.X[m,0] = -(i+0.5) * 0.5*self.hx
self.X[m,1] = self.radius - (i+0.5) * 0.5*3**0.5*self.hx
self.n[m,0] = -0.5*3**0.5
self.n[m,1] = 0.5
m = m+1
for i in range(self.nx):
self.X[m,0] = -0.5*self.radius/3**0.5 - (i+0.5) * self.hx
self.X[m,1] = 0.5*self.radius
self.n[m,0] = 0.0
self.n[m,1] = 1.0
m = m+1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*2*self.nx; ind1 = (k+1)*2*self.nx; ind2 = (k+2)*2*self.nx
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.n[ind1:ind2,:] = self.n[ind0:ind1,:].matmul(RM)
self.DS = 6*nx
self.Dlenth = self.DS*self.hx
self.DX = self.X[:self.DS,:]
self.Dn = self.n[:self.DS,:]
self.NS = self.size - self.DS
self.Nlenth = self.NS*self.hx
self.NX = self.X[self.DS:,:]
self.Nn = self.n[self.DS:,:]
self.Dright = UU(self.DX,lamda,[0,0]).reshape(-1,1)
self.Nright = RR_N(self.NX,self.Nn,lamda).reshape(-1,1)
# Test set
class TESET():
def __init__(self, radius, nx, lamda):
self.dim = 2
self.radius = radius
self.nx = nx
self.hx = self.radius/3**0.5/self.nx
self.size = 6 * self.nx*(self.nx+1) + 1
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx):
for j in range(self.nx+1):
self.X[m,0] = (1+i+0.5*j) * self.hx
self.X[m,1] = j * 0.5*3**0.5*self.hx
m = m+1
th = np.pi/3
RM = torch.Tensor([np.cos(th),np.sin(th),-np.sin(th),np.cos(th)]).reshape(2,2)
for k in range(5):
ind0 = k*self.nx*(self.nx+1)
ind1 = (k+1)*self.nx*(self.nx+1)
ind2 = (k+2)*self.nx*(self.nx+1)
self.X[ind1:ind2,:] = self.X[ind0:ind1,:].matmul(RM)
self.u_acc = UU(self.X,lamda,[0,0]).reshape(-1,1)
class LEN():
def __init__(self,bdset):
self.n = bdset.nx
self.radius = bdset.radius
self.dim = 2
self.mu = 3
self.num = 3
self.ee = 1#(1.25*self.radius)**2/(4*self.n)
self.inp = []
self.L_max = 1.0
for i in range(self.num):
node = torch.zeros(4*self.n,self.dim)
node[0:2*self.n,:] = bdset.DX[2*i*self.n:2*(i + 1)*self.n,:]
node[2*self.n:,:] = bdset.NX[2*i*self.n:2*(i + 1)*self.n,:]
self.inp.append(node)
self.L_max = max(self.forward(bdset.X))
def dist(self,X,Y):
d = ((X - Y)**2).sum(1)
return (self.ee + d)**(-0.5)
def coef(self,k):
node = self.inp[k]#[60,2]
size = node.shape[0] + node.shape[1] + 1
N = node.shape[0]
A = torch.zeros(size,size)
A[:N,N:size - 1] = node
A[N:size - 1,:N] = node.t()
A[:N,-1] = torch.ones(N)
A[-1,:N] = torch.ones(N)
for i in range(N):
A[0:N,i] = self.dist(node,node[i,:])
#print(A[0:N,0].shape,self.dist(node,node[0,:]).shape)
b = torch.zeros(size,1)
b[2*self.n:4*self.n,:] = torch.ones(2*self.n,1)
xishu,unknow = torch.solve(b,A)
return xishu
def lk(self,k,X):
node = self.inp[k]#[60,2]
size = node.shape[0] + node.shape[1] + 1
N = node.shape[0]
value = torch.zeros(X.shape[0])
xishu = self.coef(k)
for i in range(N):
value += xishu[i]*self.dist(X,node[i,:])
for j in range(node.shape[1]):
value += xishu[N + j]*X[:,j]
return value + xishu[-1]
def forward(self,X):
L = 1.0
for i in range(self.num):
L = L*(1 - (1 - self.lk(i,X))**self.mu)
return (L/self.L_max).view(-1,1)
'''
nx = 4
lamda = 2.1
radius = 2
bdset = BDSET(radius, nx, lamda)
le = LEN(bdset)
i = 0
node = torch.zeros(4*nx,2)
node[0:2*nx,:] = bdset.DX[2*i*nx:2*(i + 1)*nx,:]
node[2*nx:,:] = bdset.NX[2*i*nx:2*(i + 1)*nx,:]
dx = bdset.DX[2*i*nx:2*(i + 1)*nx,:]
print(le.lk(i,node))
#测试长度因子函数
'''
np.random.seed(1234)
torch.manual_seed(1234)
class Net(torch.nn.Module):
def __init__(self, layers):
super(Net, self).__init__()
self.layers = layers
self.hidden_layers_num = len(layers)-2
fc = []
for i in range(self.hidden_layers_num):
fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[-2], self.layers[-1]))
self.fc = torch.nn.Sequential(*fc)
def forward(self, Input):
for i in range(self.hidden_layers_num):
Output = torch.sin(self.fc[2*i](Input))
Output = torch.sin(self.fc[2*i+1](Output))
Output[:,0:self.layers[i]] = Output[:,0:self.layers[i]] + Input
Input = Output
return self.fc[-1](Input)
def pred(netg,netf,lenth,X):
return netg.forward(X) + lenth.forward(X)*netf.forward(X)
def error(u_pred, u_acc):
fenzi = ((u_pred - u_acc)**2).sum()
fenmu = (u_acc**2).sum()
return (fenzi/fenmu)**(0.5)
def Lossg(netg,bdset):#拟合Dirichlet边界
ub = netg.forward(bdset.DX)
return bdset.Dlenth * ((ub - bdset.Dright)**2).mean()
def Lossf(netf,inset,bdset):
inset.X.requires_grad = True
insetF = netf.forward(inset.X)
insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
u_in = inset.G + inset.L * insetF#inset.G为netg在inset.X上取值,后面训练时提供,此举为加快迭代速度
ux = inset.Gx + inset.Lx*insetF + inset.L*insetFx#复合函数求导,提高迭代效率
ub = bdset.N_G + bdset.N_L * netf.forward(bdset.NX)
return (inset.area*((1 + ux**2).sum(1))**0.5).mean()\
- bdset.Nlenth * (bdset.Nright*ub).mean()
def Traing(netg, bdset, optimg, epochg):
print('train neural network g')
lossg = Lossg(netg,bdset)
lossbest = lossg
print('epoch:%d,lossf:%.3e'%(0,lossg.item()))
torch.save(netg.state_dict(),'best_netg.pkl')
cycle = 100
for i in range(epochg):
st = time.time()
for j in range(cycle):
optimg.zero_grad()
lossg = Lossg(netg,bdset)
lossg.backward()
optimg.step()
if lossg < lossbest:
lossbest = lossg
torch.save(netg.state_dict(),'best_netg.pkl')
ela = time.time() - st
print('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))
# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
print('train neural network f')
ERROR,BUZHOU = [],[]
lossf = Lossf(netf,inset,bdset)
lossoptimal = lossf
trainerror = error(inset.G + inset.L * netf.forward(inset.X), inset.u_acc)
print('epoch: %d, loss: %.3e, trainerror: %.3e'
%(0, lossf.item(), trainerror.item()))
torch.save(netf.state_dict(),'best_netf.pkl')
cycle = 100
for i in range(epochf):
st = time.time()
for j in range(cycle):
optimf.zero_grad()
lossf = Lossf(netf,inset,bdset)
lossf.backward()
optimf.step()
if lossf < lossoptimal:
lossoptimal = lossf
torch.save(netf.state_dict(),'best_netf.pkl')
ela = time.time() - st
trainerror = error(inset.G + inset.L * netf.forward(inset.X), inset.u_acc)
ERROR.append(trainerror.item())
BUZHOU.append((i + 1)*cycle)
print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
((i + 1)*cycle,lossf.item(),trainerror,ela))
return ERROR,BUZHOU
# Train neural network
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):
# Calculate the length factor
inset.X.requires_grad = True
inset.L = lenth.forward(inset.X)
inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度
create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
bdset.N_L = lenth.forward(bdset.NX)#计算长度因子关于Neumann边界样本点的梯度
inset.L = inset.L.data; inset.Lx = inset.Lx.data; bdset.N_L = bdset.N_L.data
# Train neural network g
Traing(netg, bdset, optimg, epochg)
netg.load_state_dict(torch.load('best_netg.pkl'))
inset.X.requires_grad = True
inset.G = netg.forward(inset.X)
inset.Gx, = torch.autograd.grad(inset.G, inset.X,
create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
bdset.N_G = netg.forward(bdset.NX)
inset.G = inset.G.data; inset.Gx = inset.Gx.data; bdset.N_G = bdset.N_G.data
# Train neural network f
ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
return ERROR,BUZHOU
def main():
# Configurations
radius = 2.0
lamda = 2.1
nx_tr = 15
nx_te = 30
epochg = 4
epochf = 4
layer_g = [2,10,1]
layer_f = [2,10,10,10,1]
learning_rate = 0.01
tests_num = 1
# ------------------------------------------------------------------------------------------------
# Tests
testerror = torch.zeros(tests_num)
for it in range(tests_num):
# Parepare data set
inset = INSET(radius, nx_tr, lamda)
bdset = BDSET(radius, nx_tr, lamda)
teset = TESET(radius, nx_te, lamda)
# Construct length factor
#lenth = lenthfactor(bdset)
lenth = LEN(bdset)
# Construct neural network
netg = Net(layer_f)
netf = Net(layer_g)
optimg = torch.optim.Adam(netg.parameters(), lr=learning_rate)
optimf = torch.optim.Adam(netf.parameters(), lr=learning_rate)
# Train neural network
start_time = time.time()
ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)
print(ERROR,BUZHOU)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))
# Make prediction
netg.load_state_dict(torch.load('best_netg.pkl'))
netf.load_state_dict(torch.load('best_netf.pkl'))
te_U = pred(netg, netf, lenth, teset.X)
testerror[it] = error(te_U, teset.u_acc)
print('testerror = %.3e\n' %(testerror[it].item()))
# ------------------------------------------------------------------------------------------------
print(testerror.data)
errors_mean = testerror.mean()
errors_std = testerror.std()
print('test_error_mean = %.3e, test_error_std = %.3e'
%(errors_mean.item(),errors_std.item()))
if __name__ == '__main__':
main()