文献复现-PINN-边界识别-SciANN代码

参考文献:

[1] 唐明健, 唐和生. 基于物理信息的深度学习求解矩形薄板力学正反问题[J]. Chinese Journal of Computational Mechanics/Jisuan Lixue Xuebao, 2022, 39(1).

[2] Haghighat E, Juanes R. Sciann: A keras/tensorflow wrapper for scientific computations and physics-informed deep learning using artificial neural networks[J]. Computer Methods in Applied Mechanics and Engineering, 2021, 373: 113552.

相关介绍:
https://zhuanlan.zhihu.com/p/666839448

1. 导入第三方库

import numpy as np 
import sciann as sn 
import matplotlib.pyplot as plt 
import time
from numpy import pi
from sciann.utils.math import diff,log,sign,abs

2. 矩形薄板的莱维解(徐芝纶 弹性力学下 第五版)构建数据集

(1)物性参数

E=2.1*10**5  ##弹性模量
v=0.3        ##泊松比
q=50         ##均布荷载
a=1
b=1
d=1
D=E*d**3/(12.0*(1-v**2))
z=d/2.0

(2)划分采样点(20X20)

x_data, y_data = np.meshgrid(
    np.linspace(0, 1, 20), 
    np.linspace(-0.5, 0.5, 20)
)

x_data = x_data.reshape(-1,1)
y_data = y_data.reshape(-1,1)
xy = np.concatenate([x_data,y_data],axis=1)

(3)计算理论解,构建数据集

def invstd(X, mu, sig):
    return X*sig + mu
def sh(x):
    return np.sinh(x)
def ch(x):
    return np.cosh(x)


a1 = pi/2
a2 = 3*pi/2
H1 = (sh(a1)+a1*ch(a1))/(a1*sh(a1)**2-sh(a1)*ch(a1)-a1*ch(a1)**2)
H2 = (sh(a2)+a2*ch(a2))/(a2*sh(a2)**2-sh(a2)*ch(a2)-a2*ch(a2)**2)
G1 = sh(a1)/(a1*sh(a1)**2-sh(a1)*ch(a1)-a1*ch(a1)**2)
G2 = sh(a2)/(a2*sh(a2)**2-sh(a2)*ch(a2)-a2*ch(a2)**2)


w1 = []
fx1 = []
fy1 = []
fxy1 = []
wx1 = []
wy1 = []
wx21 = []
x_real=np.linspace(0,1,20)
y_real=np.linspace(-0.5,0.5,20)

c1 = 4*q/pi**5/D
for y in y_real:
    for x in x_real:
        w2_1 = c1*(H1*ch(pi*y)-G1*pi*y*sh(pi*y)+1)*np.sin(pi*x)
        w2_2 = c1*(H2*ch(3*pi*y)-G2*3*pi*y*sh(3*pi*y)+1)*np.sin(3*pi*x)/3**5
        w2 = round(w2_1+w2_2,8)
        
        wx1_1_1 = c1*(H1*ch(pi*y)-G1*pi*y*sh(pi*y)+1)*np.cos(pi*x)*pi
        wx1_1_2 = c1*(H2*ch(3*pi*y)-G2*3*pi*y*sh(3*pi*y)+1)*np.cos(3*pi*x)/3**5*3*pi
        wx1_1 = round(wx1_1_1+wx1_1_2,8)  #对x一阶导

        wy1_1_1 = c1*(H1*pi*sh(pi*y)-G1*pi*sh(pi*y)-G1*pi**2*y*ch(pi*y))*np.sin(pi*x)
        wy1_1_2 = c1/3**5*(H2*3*pi*sh(3*pi*y)-G2*3*pi*sh(3*pi*y)-G2*(3*pi)**2*y*ch(3*pi*y))*np.sin(3*pi*x)
        wy1_1 = round(wy1_1_1+wy1_1_2,8)  #对y求一阶导
        
        wx2_1 = -c1*(H1*ch(pi*y)-G1*pi*y*sh(pi*y)+1)*np.sin(pi*x)*pi**2
        wx2_2 = -c1*(H2*ch(2*pi*y)-G2*2*pi*y*sh(2*pi*y)+1)*np.sin(3*pi*x)/3**5*(3*pi)**2
        wx2 = round(wx2_1+wx2_2,8) #对x求二阶导

        wx1y1_1 = c1*(H1*pi*sh(pi*y)-G1*pi*sh(pi*y)-G1*pi**2*y*ch(pi*y))*np.cos(pi*x)*pi
        wx1y1_2 = c1/3**5*(H2*3*pi*sh(3*pi*y)-G2*3*pi*sh(3*pi*y)-G2*(3*pi)**2*y*ch(3*pi*y))*np.cos(3*pi*x)*3*pi
        wx1y1 = round(wx1y1_1+wx1y1_2,8) #对xy各求一阶导

        wy2_1 =  c1*(H1*pi**2*ch(pi*y)-G1*pi**2*ch(pi*y)-G1*pi**2*ch(pi*y)-G1*pi**3*y*sh(pi*y))*np.sin(pi*x)
        wy2_2 = c1/3**5*(H2*(3*pi)**2*ch(3*pi*y)-G2*(3*pi)**2*ch(3*pi*y)-G2*(3*pi)**2*ch(3*pi*y)-G2*(3*pi)**3*y*sh(3*pi*y))*np.sin(3*pi*x)
        wy2 = round(wy2_1+wy2_2,8) #对y求二阶导
        
        mx = -D*(wx2+v*wy2)
        my = -D*(wy2+v*wx2)
        mxy = -D*(1-v)*wx1y1

        fx2 = 12*mx*z
        fy2 = 12*my*z
        fxy2 = 12*mxy*z
       
        w1.append(w2)
        fx1.append(fx2)
        fy1.append(fy2)
        fxy1.append(fxy2)
        wx1.append(wx1_1)
        wy1.append(wy1_1)
        wx21.append(wx2)

w1= np.array(w1).reshape(-1,1)
fx1=np.array(fx1).reshape(-1,1)
fy1= np.array(fy1).reshape(-1,1)
fxy1= np.array(fxy1).reshape(-1,1)
wx1= np.array(wx1).reshape(-1,1)
wy1= np.array(wy1).reshape(-1,1)
wx21= np.array(wx21).reshape(-1,1)

##归一化
w3=(w1-w1.mean())/w1.std()
fx3=(fx1-fx1.mean())/fx1.std()
fy3=(fy1-fy1.mean())/fy1.std()
fxy3=(fxy1-fxy1.mean())/fxy1.std()

train_1 = np.concatenate([w3,fx3],axis=1)
train_2 = np.concatenate([train_1,fy3],axis=1)
train_3 = np.concatenate([train_2,fxy3],axis=1)
train_data=np.concatenate([xy,train_3],axis=1)

3. 构建神经网络

(1) 输入

x = sn.Variable('x',dtype='float32')
y = sn.Variable('y',dtype='float32')

(2) 输出

w_1 = sn.Functional('w', [x,y], 6*[20], 'tanh')
sigam_x1 = sn.Functional('sigam_x', [x,y], 6*[20], 'tanh')
sigam_y1 = sn.Functional('sigam_y', [x,y], 6*[20], 'tanh')
sigam_xy1 = sn.Functional('sigam_xy', [x,y], 6*[20], 'tanh')

(3) 反演参数,初始值设置为0.5

lambda1 = sn.Parameter(0.5, inputs=[x,y], name="lambda1")
lambda2 = sn.Parameter(0.5, inputs=[x,y], name="lambda2")
lambda3 = sn.Parameter(0.5, inputs=[x,y], name="lambda3")
lambda4 = sn.Parameter(0.5, inputs=[x,y], name="lambda4")

4. 损失函数

(1) 归一化还原

w = invstd(w_1, w1.mean(), w1.std()) 
sigam_x = invstd(sigam_x1, fx1.mean(), fx1.std())
sigam_y = invstd(sigam_y1, fy1.mean(), fy1.std())
sigam_xy = invstd(sigam_xy1, fxy1.mean(), fxy1.std())

(2) 求导设置

w_x1 = diff(w,x)
w_y1 = diff(w,y)

w_x2 = diff(w,x,order=2)
w_y2 = diff(w,y,order=2)
w_xy = diff(w_x1,y)

u_x=- z *w_x2
u_y=- z *w_y2
u_xy = -2.0*z*w_xy

w_x4=diff(w, x, order=4)
w_y4=diff(w, y, order=4)
w_x2y2 = diff(w_x2, y ,order=2)

(3) 构建边界识别项

SBAR = w1.std()
WBAR = np.min([fx1.std(),fy1.std(),fxy1.std()])
WXBAR = wx1.std()
WX2BAR = wx21.std()


##考虑+10**(-10000)防止直接出现log0,NaM
TOL = 0.0000
l9 = lambda1*(1.0-sign(x-TOL))*log((w_x1/WXBAR)**2+10**(-10000))*0.5
l10 = (1-lambda1)*(1.0-sign(x-TOL))*log((w_x2/WX2BAR+v*w_y2/WX2BAR)**2+10**(-10000))*0.5

l11 = lambda2*(1.0+sign(x+TOL-1))*log((w_x1/WXBAR)**2+10**(-10000))*0.5
l12 = (1-lambda2)*(1.0+sign(x+TOL-1))*log((w_x2/WX2BAR+v*w_y2/WX2BAR)**2+10**(-10000))*0.5

l13 = lambda3*(1.0-sign(y-TOL+0.5))*log(abs(w_y1/WXBAR))
l14 = (1-lambda3)*(1.0-sign(y-TOL+0.5))*log(abs(w_y2/WX2BAR+v*w_x2/WX2BAR))

l15 = lambda4*(1.0+sign(y+TOL-0.5))*log(abs(w_y1/WXBAR))
l16 = (1-lambda4)*(1.0+sign(y+TOL-0.5))*log(abs(w_y2/WX2BAR+v*w_x2/WX2BAR))

(4) 损失函数

L1 = sn.Data(w_1)
L2 = sn.Data(sigam_x1)
L3 = sn.Data(sigam_y1)
L4 = sn.Data(sigam_xy1)
L5 = sn.Tie(-w_x4/WBAR,w_y4/WBAR+2.0*w_x2y2/WBAR-q/D/WBAR)
L6 = sn.Tie(sigam_x/WBAR,E/(1.0-v**2)*(u_x+v*u_y)/WBAR)
L7 = sn.Tie(sigam_y/WBAR,E/(1.0-v**2)*(u_y+v*u_x)/WBAR)
L8 = sn.Tie(sigam_xy/WBAR,E/(2.0*(1+v))*u_xy/WBAR)

L9 = sn.Tie(-l9,l10)
L10 = sn.Data(-l10)
L11 = sn.Tie(-l11,l12)
L12 = sn.Data(-l12)
L13 = sn.Tie(-l13,l14)
L14 = sn.Tie(-l15,l16)
L15 = sn.Data(-l13)
L16 = sn.Data(-l15)

5. 优化模型

model = sn.SciModel(
    inputs=[x, y],
    targets=[L1,L2,L3,L4,L5,L6,L7,L8,L9,L10,L11,L12,L13,L14,L15,L16],
    loss_func="mse",  
)

6. 训练

data_L1 = train_data[:,2]
data_L2 = train_data[:,3]
data_L3 = train_data[:,4]
data_L4 = train_data[:,5]
data_L5 = 'zeros'
data_L6 = 'zeros'
data_L7 = 'zeros'
data_L8 = 'zeros'

data_L9 = 'zeros'
data_L10 = 'zeros'
data_L11 = 'zeros'
data_L12 = 'zeros'
data_L13 = 'zeros'
data_L14 = 'zeros'
data_L15 = 'zeros'
data_L16 = 'zeros'

target_data = [data_L1,data_L2,data_L3,data_L4,data_L5,data_L6,data_L7,data_L8,data_L9,data_L10,data_L11,data_L12,data_L13,data_L14,data_L15,data_L16]

input_data = [train_data[:,0], train_data[:,1]]

t=time.time()

history = model.train(
      input_data, target_data ,  
      batch_size=32,
      epochs=2500, 
      log_parameters={'parameters':[lambda1, lambda2,lambda3,lambda4], 'freq':1},
      adaptive_weights={'method':'NTK', 'freq': 100}, 
      verbose=0
      )

t = time.time() - t

7. 结果

print("lambda1: {},  lambda2: {},  lambda3: {},  lambda4: {}".format(lambda1.value, lambda2.value,lambda3.value, lambda4.value))

fig, ax= plt.subplots(2,2, figsize=(12, 10))

ax[0,0].semilogy(history.history['lambda1'])
ax[0,0].set_xlabel('epochs')
ax[0,0].set_ylabel('')
ax[0,0].set_title('lambda1', fontsize=12)

ax[0,1].semilogy(history.history['lambda2'])
ax[0,1].set_xlabel('epochs')
ax[0,1].set_ylabel('')
ax[0,1].set_title('lambda2', fontsize=12)

ax[1,0].semilogy(history.history['lambda3'])
ax[1,0].set_xlabel('epochs')
ax[1,0].set_ylabel('')
ax[1,0].set_title('lambda3', fontsize=12)

ax[1,1].semilogy(history.history['lambda4'])
ax[1,1].set_xlabel('epochs')
ax[1,1].set_ylabel('')
ax[1,1].set_title('lambda4', fontsize=12)

plt.show()

  • 30
    点赞
  • 22
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值