采样
def U(X, order,prob):
if prob==1:
temp = 10*(X[:,0]+X[:,1])**2 + (X[:,0]-X[:,1])**2 + 0.5
if order[0]==0 and order[1]==0:
return torch.log(temp)
if order[0]==1 and order[1]==0:
return temp**(-1) * (20*(X[:,0]+X[:,1]) + 2*(X[:,0]-X[:,1]))
if order[0]==0 and order[1]==1:
return temp**(-1) * (20*(X[:,0]+X[:,1]) - 2*(X[:,0]-X[:,1]))
if order[0]==2 and order[1]==0:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if order[0]==1 and order[1]==1:
return - temp**(-2) * (20*(X[:,0]+X[:,1])+2*(X[:,0]-X[:,1])) \
* (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) \
+ temp**(-1) * (18)
if order[0]==0 and order[1]==2:
return - temp**(-2) * (20*(X[:,0]+X[:,1])-2*(X[:,0]-X[:,1])) ** 2 \
+ temp**(-1) * (22)
if prob==2:
if order[0]==0 and order[1]==0:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==0:
return (3*X[:,0]*X[:,0]-1) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==1:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==2 and order[1]==0:
return (6*X[:,0]) * \
0.5*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if order[0]==1 and order[1]==1:
return (3*X[:,0]*X[:,0]-1) * \
(torch.exp(2*X[:,1])-torch.exp(-2*X[:,1]))
if order[0]==0 and order[1]==2:
return (X[:,0]*X[:,0]*X[:,0]-X[:,0]) * \
2*(torch.exp(2*X[:,1])+torch.exp(-2*X[:,1]))
if prob==3:
temp1 = X[:,0]*X[:,0] - X[:,1]*X[:,1]
temp2 = X[:,0]*X[:,0] + X[:,1]*X[:,1] + 0.1
if order[0]==0 and order[1]==0:
return temp1 * temp2**(-1)
if order[0]==1 and order[1]==0:
return (2*X[:,0]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,0])
if order[0]==0 and order[1]==1:
return (-2*X[:,1]) * temp2**(-1) + \
temp1 * (-1)*temp2**(-2) * (2*X[:,1])
if order[0]==2 and order[1]==0:
return (2) * temp2**(-1) + \
2 * (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if order[0]==1 and order[1]==1:
return (2*X[:,0]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
(-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,0]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,0]) * (2*X[:,1])
if order[0]==0 and order[1]==2:
return (-2) * temp2**(-1) + \
2 * (-2*X[:,1]) * (-1)*temp2**(-2) * (2*X[:,1]) + \
temp1 * (2)*temp2**(-3) * (2*X[:,1])**2 + \
temp1 * (-1)*temp2**(-2) * (2)
if prob==4:
temp = torch.exp(-4*X[:,1]*X[:,1])
if order[0]==0 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * temp + \
(1-ind) * (-(-X[:,0]+1)**4+1) * temp
if order[0]==1 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * temp + \
(1-ind) * (4*(-X[:,0]+1)**3) * temp
if order[0]==0 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(-8*X[:,1])) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(-8*X[:,1]))
if order[0]==2 and order[1]==0:
ind = (X[:,0]<=0).float()
return ind * (12*(X[:,0]+1)**2) * temp + \
(1-ind) * (-12*(-X[:,0]+1)**2) * temp
if order[0]==1 and order[1]==1:
ind = (X[:,0]<=0).float()
return ind * (4*(X[:,0]+1)**3) * (temp*(-8*X[:,1])) + \
(1-ind) * (4*(-X[:,0]+1)**3) * (temp*(-8*X[:,1]))
if order[0]==0 and order[1]==2:
ind = (X[:,0]<=0).float()
return ind * ((X[:,0]+1)**4-1) * (temp*(64*X[:,1]*X[:,1]-8)) + \
(1-ind) * (-(-X[:,0]+1)**4+1) * (temp*(64*X[:,1]*X[:,1]-8))
def A(X, order, ind):
if order==0:
if ind==[1,1]: return (X[:,0]+X[:,1])*(X[:,0]+X[:,1]) + 1 # a11
if ind==[1,2]: return -(X[:,0]+X[:,1])*(X[:,0]-X[:,1]) # a12
if ind==[2,1]: return -(X[:,0]+X[:,1])*(X[:,0]-X[:,1]) # a21
if ind==[2,2]: return (X[:,0]-X[:,1])*(X[:,0]-X[:,1]) + 1 # a22
if order==1:
if ind==[1,1]: return 2*(X[:,0]+X[:,1]) # a11_x
if ind==[1,2]: return -2*X[:,0] # a12_x
if ind==[2,1]: return 2*X[:,1] # a21_y
if ind==[2,2]: return -2*(X[:,0]-X[:,1]) # a22_y
def C(X, prob):
return A(X,1,[1,1])*U(X,[1,0],prob) + A(X,0,[1,1])*U(X,[2,0],prob) + \
A(X,1,[1,2])*U(X,[0,1],prob) + A(X,0,[1,2])*U(X,[1,1],prob) + \
A(X,1,[2,1])*U(X,[1,0],prob) + A(X,0,[2,1])*U(X,[1,1],prob) + \
A(X,1,[2,2])*U(X,[0,1],prob) + A(X,0,[2,2])*U(X,[0,2],prob)
def NEU(X, n, prob):
return (A(X,0,[1,1])*U(X,[1,0],prob) + A(X,0,[1,2])*U(X,[0,1],prob)) * n[:,0] + \
(A(X,0,[2,1])*U(X,[1,0],prob) + A(X,0,[2,2])*U(X,[0,1],prob)) * n[:,1]
class INSET():
def __init__(self,bounds,hx,prob):
self.bounds = bounds
self.dim = 2
self.hx = hx
nx = [[int((bounds[0,1] - bounds[0,0])/hx[0]),
int((bounds[0,2] - bounds[0,1])/hx[0])],
[int((bounds[1,2] - bounds[1,0])/hx[1]),
int((bounds[1,1] - bounds[1,0])/hx[1])]]
self.nx = torch.tensor(nx)
self.size = self.nx[0,0]*self.nx[1,0] + self.nx[0,1]*self.nx[1,1]
self.area = (bounds[0,1] - bounds[0,0])*(bounds[1,2] - bounds[1,0])\
+ (bounds[0,2] - bounds[0,2])*(bounds[1,1] - bounds[1,0])
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx[0,0]):
for j in range(self.nx[1,0]):
self.X[m,0] = bounds[0,0] + (i + 0.5)*hx[0]
self.X[m,1] = bounds[1,0] + (j + 0.5)*hx[1]
m = m + 1
for i in range(self.nx[0,1]):
for j in range(self.nx[1,1]):
self.X[m,0] = bounds[0,1] + (i + 0.5)*hx[0]
self.X[m,1] = bounds[1,0] + (j + 0.5)*hx[1]
m = m + 1
self.u_acc = U(self.X,[0,0],prob).view(-1,1)
self.right = - C(self.X,prob).view(-1,1)# - nabla A \nabla u = -c
self.AM = torch.zeros(self.size,2,2)#储存矩阵A在所有内点的取值,方便损失函数计算 (A \nabla u)* \nabal u
self.AM[:,0,0] = A(self.X,0,[1,1]);self.AM[:,0,1] = A(self.X,0,[1,2])
self.AM[:,1,0] = A(self.X,0,[2,1]);self.AM[:,1,1] = A(self.X,0,[2,2])
class BDSET():
def __init__(self,bounds,hx,prob):
self.bounds = bounds
self.dim = 2
self.hx = hx
nx = [[int((bounds[0,1] - bounds[0,0])/hx[0]),
int((bounds[0,2] - bounds[0,1])/hx[0])],
[int((bounds[1,2] - bounds[1,0])/hx[1]),
int((bounds[1,1] - bounds[1,0])/hx[1])]]
self.nx = torch.tensor(nx)
#上下边界为Dirichlet,左右边界为Neumann边界
self.DS = 2*(self.nx[0,0] + self.nx[0,1])
self.Dlenth = 2*(bounds[0,2] - bounds[0,0])
self.DX = torch.zeros(self.DS,self.dim)
self.Dnum = []
m = 0
self.Dnum.append(m)
for i in range(self.nx[0,0]):#上边界
self.DX[m,0] = bounds[0,0] + (i + 0.5)*hx[0]
self.DX[m,1] = bounds[1,2]
m = m + 1
self.Dnum.append(m)
for i in range(self.nx[0,1]):#中间横轴边界
self.DX[m,0] = bounds[0,1] + (i + 0.5)*hx[0]
self.DX[m,1] = bounds[1,1]
m = m + 1
self.Dnum.append(m)
for i in range(self.nx[0,0] + self.nx[0,1]):#下面横轴边界
self.DX[m,0] = bounds[0,0] + (i + 0.5)*hx[0]
self.DX[m,1] = bounds[1,0]
m = m + 1
self.Dnum.append(m)
#-----------------
self.NS = 2*self.nx[1,0]
self.Nlenth = 2*(bounds[1,2] - bounds[1,0])
self.NX = torch.zeros(self.NS,self.dim)
self.Nn = torch.zeros(self.NS,self.dim)
self.Nnum = []
n = 0
self.Nnum.append(n)
for j in range(self.nx[1,1]):#右边下部分边界
self.NX[n,0] = bounds[0,2]
self.NX[n,1] = bounds[1,0] + (j + 0.5)*hx[1]
self.Nn[n,0] = 1;self.Nn[n,1] = 0
n = n + 1
self.Nnum.append(n)
for j in range(self.nx[1,0]):#左边纵轴边界
self.NX[n,0] = bounds[0,0]
self.NX[n,1] = bounds[1,0] + (j + 0.5)*hx[1]
self.Nn[n,0] = -1;self.Nn[n,1] = 0
n = n + 1
self.Nnum.append(n)
for j in range(self.nx[1,0] - self.nx[1,1]):#右边上部分边界
self.NX[n,0] = bounds[0,1]
self.NX[n,1] = bounds[1,1] + (j + 0.5)*hx[1]
self.Nn[n,0] = 1;self.Nn[n,1] = 0
n = n + 1
self.Nnum.append(n)
self.Dright = U(self.DX,[0,0],prob).view(-1,1)#储存Dirichlet边界精确解取值
self.Nright = NEU(self.NX,self.Nn,prob).view(-1,1)#储存Neumann边界上条件
bounds = torch.tensor([[-2,0,2],[-1,0,2]]).float()
hx = [0.4,0.1]
prob = 4
bdset = BDSET(bounds,hx,prob)
X = bdset.DX;Y = bdset.NX
plt.scatter(X[:,0],X[:,1])
plt.scatter(Y[:,0],Y[:,1])
这里重点关注边界集合的采样,如图所示,我们取橙色边界为Neumann边界,其他为Dirichlet边界。
class TESET():
def __init__(self,bounds,hx,prob):
self.bounds = bounds
self.dim = 2
self.hx = hx
nx = [[int((bounds[0,1] - bounds[0,0])/hx[0]),
int((bounds[0,2] - bounds[0,1])/hx[0])],
[int((bounds[1,2] - bounds[1,0])/hx[1]),
int((bounds[1,1] - bounds[1,0])/hx[1])]]
self.nx = torch.tensor(nx)
self.size = (self.nx[0,0] + 1)*(self.nx[1,0] + 1) + self.nx[0,1]*(self.nx[1,1] + 1)
self.X = torch.zeros(self.size,self.dim)
m = 0
for i in range(self.nx[0,0] + 1):
for j in range(self.nx[1,0] + 1):
self.X[m,0] = self.bounds[0,0] + i*self.hx[0]
self.X[m,1] = self.bounds[1,0] + j*self.hx[1]
m = m + 1
for i in range(self.nx[0,1]):
for j in range(self.nx[1,1] + 1):
self.X[m,0] = self.bounds[0,1] + (i + 1)*self.hx[0]
self.X[m,1] = self.bounds[1,0] + j*self.hx[1]
m = m + 1
self.u_acc = U(self.X,[0,0],prob).view(-1,1)
class lenthfactor():
def __init__(self,bdset,mu):
self.mu = mu
self.nx = bdset.nx
self.dim = 2
self.mu = 3
self.ee = torch.zeros(3)
self.inp = []#分为三部分,每一部分储存D边界和N边界
self.you = []#分为三部分,每一部分储存D边界取值(0)和N边界取值(1)
for i in range(3):
D_size = bdset.Dnum[i + 1] - bdset.Dnum[i];N_size = bdset.Nnum[i + 1] - bdset.Nnum[i]
size = D_size + N_size
print(size,D_size,N_size)
node = torch.zeros(size,self.dim)
you = torch.zeros(size,1)
node[:D_size,:] = bdset.DX[bdset.Dnum[i]:bdset.Dnum[i + 1],:]
node[D_size:,:] = bdset.NX[bdset.Nnum[i]:bdset.Nnum[i + 1],:]
you[:D_size,:] = torch.zeros(D_size,1)
you[D_size:,:] = torch.ones(N_size,1)
self.ee[i] = 1.25*(D_size*bdset.hx[0] + N_size*bdset.hx[1])**2/self.nx[0,0].float()
self.inp.append(node)
self.you.append(you)
self.L_max = 1.0
self.L_max = max(self.forward(bdset.NX))
def dist(self,X,Y,ee):
d = ((X - Y)**2).sum(1)
return (ee + d)**(-0.5)
def coef(self,k,ee):
node = self.inp[k];you = self.you[k]
size = node.shape[0] + node.shape[1] + 1;N = node.shape[0]
matrix = torch.zeros(size,size)
matrix[:N,N:size - 1] = node
matrix[N:size - 1,:N] = node.t()
matrix[:N,-1] = torch.ones(N)
matrix[-1,:N] = torch.ones(N)
i = 0
for i in range(N):
matrix[0:N,i] = self.dist(node,node[i:i + 1,:],ee)
b = torch.zeros(size,1)
b[:N,:] = you
xishu,lu = torch.solve(b,matrix)
#print(matrix@xishu - b)
return xishu
def test(self,k,X,ee):
xishu = self.coef(k,ee);node = self.inp[k]
size = node.shape[0] + node.shape[1] + 1;N = node.shape[0]
out = 0
for i in range(N):
out += xishu[i,0]*self.dist(X,node[i:i + 1],ee)
for i in range(X.shape[1]):
out += xishu[N + i,0]*X[:,i]
return out + xishu[-1,0]
def forward(self,X):
L = 1.0
for i in range(3):
L = L*(1 - (1 - self.test(i,X,self.ee[i]))**self.mu)
return (L/self.L_max).view(-1,1)
bounds = torch.tensor([[-2,0,2],[-1,0,2]]).float()
hx = [0.1,0.1]
prob = 4
bdset = BDSET(bounds,hx,prob)
lenth = lenthfactor(bdset,3)
k = 1
inp = lenth.inp[k]
plt.scatter(inp[:,0],inp[:,1])
X = inp
print(lenth.test(k,X,lenth.ee[k]))
print(lenth.forward(X))
重点介绍长度因子,我们要明确一点,在设定长度因子时
但是我做的长度因子效果不好,在Dirichlet边界上无法达到适合的精度,这一点暂时不知道入手。
np.random.seed(1234)
torch.manual_seed(1234)
class Net(torch.nn.Module):
def __init__(self, layers):
super(Net, self).__init__()
self.layers = layers
self.hidden_layers_num = len(layers)-2
fc = []
for i in range(self.hidden_layers_num):
fc.append(torch.nn.Linear(self.layers[i],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[i+1],self.layers[i+1]))
fc.append(torch.nn.Linear(self.layers[-2], self.layers[-1]))
self.fc = torch.nn.Sequential(*fc)
def forward(self, Input):
for i in range(self.hidden_layers_num):
Output = torch.sin(self.fc[2*i](Input))
Output = torch.sin(self.fc[2*i+1](Output))
Output[:,0:self.layers[i]] = Output[:,0:self.layers[i]] + Input
Input = Output
return self.fc[-1](Input)
def pred(netg,netf,lenth,X):
return netg.forward(X) + lenth.forward(X)*netf.forward(X)
def error(u_pred, u_acc):
fenzi = ((u_pred - u_acc)**2).sum()
fenmu = (u_acc**2).sum()
return (fenzi/fenmu)**(0.5)
def Lossg(netg,bdset):#拟合Dirichlet边界
ub = netg.forward(bdset.DX)
return bdset.Dlenth * ((ub - bdset.Dright)**2).mean()
def Lossf(netf,inset,bdset):
inset.X.requires_grad = True
insetF = netf.forward(inset.X)
insetFx, = torch.autograd.grad(insetF, inset.X, create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
u_in = inset.G + inset.L * insetF#inset.G为netg在inset.X上取值,后面训练时提供,此举为加快迭代速度
ux = inset.Gx + inset.Lx*insetF + inset.L*insetFx#复合函数求导,提高迭代效率
temp = (inset.AM@ux.view(-1,inset.dim,1)).view(-1,inset.dim)
ub = bdset.N_G + bdset.N_L * netf.forward(bdset.NX)
return 0.5*inset.area * ((temp*ux).sum(1)).mean() \
- inset.area * (inset.right*u_in).mean() \
- bdset.Nlenth * (bdset.Nright*ub).mean()
def Traing(netg, bdset, optimg, epochg):
print('train neural network g')
lossg = Lossg(netg,bdset)
lossbest = lossg
print('epoch:%d,lossf:%.3e'%(0,lossg.item()))
torch.save(netg.state_dict(),'best_netg.pkl')
cycle = 100
for i in range(epochg):
st = time.time()
for j in range(cycle):
optimg.zero_grad()
lossg = Lossg(netg,bdset)
lossg.backward()
optimg.step()
if lossg < lossbest:
lossbest = lossg
torch.save(netg.state_dict(),'best_netg.pkl')
ela = time.time() - st
print('epoch:%d,lossg:%.3e,time:%.2f'%((i + 1)*cycle,lossg.item(),ela))
# Train neural network f
def Trainf(netf, inset, bdset, optimf, epochf):
print('train neural network f')
ERROR,BUZHOU = [],[]
lossf = Lossf(netf,inset,bdset)
lossoptimal = lossf
trainerror = error(inset.G + inset.L * netf.forward(inset.X), inset.u_acc)
print('epoch: %d, loss: %.3e, trainerror: %.3e'
%(0, lossf.item(), trainerror.item()))
torch.save(netf.state_dict(),'best_netf.pkl')
cycle = 100
for i in range(epochf):
st = time.time()
for j in range(cycle):
optimf.zero_grad()
lossf = Lossf(netf,inset,bdset)
lossf.backward()
optimf.step()
if lossf < lossoptimal:
lossoptimal = lossf
torch.save(netf.state_dict(),'best_netf.pkl')
ela = time.time() - st
trainerror = error(inset.G + inset.L * netf.forward(inset.X), inset.u_acc)
ERROR.append(trainerror.item())
BUZHOU.append((i + 1)*cycle)
print('epoch:%d,lossf:%.3e,train error:%.3e,time:%.2f'%
((i + 1)*cycle,lossf.item(),trainerror,ela))
return ERROR,BUZHOU
# Train neural network
def Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf):
# Calculate the length factor
inset.X.requires_grad = True
inset.L = lenth.forward(inset.X)
inset.Lx, = torch.autograd.grad(inset.L, inset.X,#计算长度因子关于内部点输入的梯度
create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
bdset.N_L = lenth.forward(bdset.NX)#计算长度因子关于Neumann边界样本点的梯度
inset.L = inset.L.data; inset.Lx = inset.Lx.data; bdset.N_L = bdset.N_L.data
# Train neural network g
Traing(netg, bdset, optimg, epochg)
netg.load_state_dict(torch.load('best_netg.pkl'))
inset.X.requires_grad = True
inset.G = netg.forward(inset.X)
inset.Gx, = torch.autograd.grad(inset.G, inset.X,
create_graph=True, retain_graph=True,
grad_outputs=torch.ones(inset.size,1))
bdset.N_G = netg.forward(bdset.NX)
inset.G = inset.G.data; inset.Gx = inset.Gx.data; bdset.N_G = bdset.N_G.data
# Train neural network f
ERROR,BUZHOU = Trainf(netf, inset, bdset, optimf, epochf)
return ERROR,BUZHOU
bounds = torch.tensor([[-2,0,2],[-1,0,2]]).float()
hx_tr = [0.1,0.1]
hx_te = [0.05,0.1]
prob = 2
inset = INSET(bounds,hx_tr,prob)
bdset = BDSET(bounds,hx_tr,prob)
teset = TESET(bounds,hx_te,prob)
lenth = lenthfactor(bdset,3)
lay_g = [2,10,10,1];lay_f = [2,10,10,1]
netg = Net(lay_g);netf = Net(lay_f)
lr = 1e-2
optimg = torch.optim.Adam(netg.parameters(), lr=lr)
optimf = torch.optim.Adam(netf.parameters(), lr=lr)
epochg = 10
epochf = 10
tests_num = 1
# ------------------------------------------------------------------------------------------------
testerror = torch.zeros(tests_num)
for it in range(tests_num):
start_time = time.time()
ERROR,BUZHOU = Train(netg, netf, lenth, inset, bdset, optimg, optimf, epochg, epochf)
print(ERROR,BUZHOU)
elapsed = time.time() - start_time
print('Train time: %.2f' %(elapsed))
netg.load_state_dict(torch.load('best_netg.pkl'))
netf.load_state_dict(torch.load('best_netf.pkl'))
te_U = pred(netg, netf, lenth, teset.X)
testerror[it] = error(te_U, teset.u_acc)
print('testerror = %.3e\n' %(testerror[it].item()))
print(testerror.data)
testerror_mean = testerror.mean()
testerror_std = testerror.std()
print('testerror_mean = %.3e, testerror_std = %.3e'
%(testerror_mean.item(),testerror_std.item()))
这里说明一下,这个代码针对prob=2,3,4训练效果奇差,而且针对prob=1训练精度也不够。希望有人能进行修改。