PINN解偏微分方程-tensorflow 2.0
本文基于CSDN博主** _刘文凯_ **的两篇文章,将其中的pytorch代码改写为了tensorflow2.0代码,供参考。
PINN学习与实验(一)
PINN学习与实验(二)
1. 用PINN求解简单的PDE1
已知:
{
f
′
(
x
)
=
f
(
x
)
f
(
0
)
=
1
\begin{cases} f^{'}(x)=f(x) \\ f(0)=1 \end{cases}
{f′(x)=f(x)f(0)=1
求 f ( x ) f(x) f(x)
tensorflow 2.0代码如下:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib auto # jupyter notebook中魔法方法
# 模型定义
class Net(tf.keras.Model):
def __init__(self, NN):
super(Net, self).__init__()
self.input_layer = tf.keras.layers.Dense(NN, input_dim= 1)
self.hidden_layer = tf.keras.layers.Dense(NN)
self.output_layer = tf.keras.layers.Dense(1)
def call(self, x):
out = tf.tanh(self.input_layer(x))
out = tf.tanh(self.hidden_layer(out))
out = self.output_layer(out)
return out
net=Net(20) # 3层 20\20\1
# 损失函数
mse = tf.keras.losses.MeanSquaredError()
# 优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
# 打包
net.compile(optimizer, loss=mse)
plt.ion() # 动态图
fig = plt.figure(figsize=(6,5))
iterations=20000
for epoch in range(iterations):
with tf.GradientTape() as tape:
# Boundary Loss
x_0 = tf.zeros((2000, 1))
y_0 = net(x_0)
mse_i = mse(y_0, tf.ones((2000, 1)))
# ODE Loss
x_in = tf.Variable(tf.random.uniform((2000, 1), dtype=tf.float32, minval=0.0, maxval=2.0))
with tf.GradientTape() as t:
y_hat = net(x_in)
dy_dx = t.gradient(y_hat, x_in)
mse_f = mse(y_hat, dy_dx)
loss = mse_i + mse_f
gradients = tape.gradient(loss, net.trainable_variables)
optimizer.apply_gradients(zip(gradients, net.trainable_variables))
if (epoch+1)%100==0:
fig.clf() # 清空当前Figure对象
fig.suptitle("epoch: %d" % (epoch+1))
ax = fig.add_subplot(111)
y_real = tf.exp(x_in) # y 真实值
y_pred = net(x_in) # y 预测值
ax.scatter(x_in.numpy(), y_real.numpy(), label="true")
ax.scatter(x_in.numpy(), y_pred.numpy(),c='red', label="pred")
ax.legend()
plt.pause(0.1)
plt.show()
迭代3000次后结果如下图:
迭代10000次后结果如下图,可见已基本接近于真实解。
注:PINN最终是会求得真实解的,这里图片没放出来,因为两条线重合了。
2. 用PINN求解复杂的PDE2
已知:
{
u
t
+
u
∗
u
x
−
w
∗
u
x
x
=
0
,
(
1
)
u
(
0
,
x
)
=
−
s
i
n
(
π
x
)
,
(
2
)
u
(
t
,
1
)
=
0
,
(
3
)
u
(
t
,
−
1
)
=
0
,
(
4
)
w
=
0.01
π
,
x
∈
(
−
1
,
1
)
,
t
∈
(
0
,
1
)
\begin{cases} u_t+u*u_x-w*u_{xx}=0, & (1) \\ u(0,x)=-sin({\pi}x), & (2) \\ u(t,1)=0, & (3) \\ u(t,-1)=0, & (4) \\ w=\frac{0.01}{\pi},x{\in}(-1,1),t{\in}(0,1) \end{cases}
⎩
⎨
⎧ut+u∗ux−w∗uxx=0,u(0,x)=−sin(πx),u(t,1)=0,u(t,−1)=0,w=π0.01,x∈(−1,1),t∈(0,1)(1)(2)(3)(4)
求 u = u ( t , x ) u=u(t,x) u=u(t,x)
tensorflow 2.0代码如下:
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
%matplotlib auto
# 模型定义
class Net(tf.keras.Model):
def __init__(self, NN):
super(Net, self).__init__()
self.input_layer = tf.keras.layers.Dense(NN, input_dim= 2)
self.hidden_layer = tf.keras.layers.Dense(NN)
self.hidden_layer_2 = tf.keras.layers.Dense(NN)
self.output_layer = tf.keras.layers.Dense(1)
def call(self, x):
out = tf.tanh(self.input_layer(x))
out = tf.tanh(self.hidden_layer(out))
out = tf.tanh(self.hidden_layer_2(out))
out = self.output_layer(out)
return out
net=Net(256) # 4层 256\256\256\1
# 损失函数
mse = tf.keras.losses.MeanSquaredError()
# 优化器
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
# 打包
net.compile(optimizer, loss=mse)
plt.ion() # 动态图
fig = plt.figure(figsize=(6,5))
iterations=20000
# 初始化常量
w = 0.01 / 3.1415926
t_bc_zeros = tf.constant(np.zeros((2000, 1)), dtype=tf.float32)
x_in_pos_one = tf.constant(np.ones((2000, 1)), dtype=tf.float32)
x_in_neg_one = tf.constant(-np.ones((2000, 1)), dtype=tf.float32)
u_in_zeros = tf.constant(np.zeros((2000, 1)), dtype=tf.float32)
for epoch in range(iterations):
with tf.GradientTape() as tape:
# 初始化变量
t_var = tf.Variable(tf.random.uniform((2000, 1), dtype=tf.float32, minval=0.0, maxval=1.0))
x_var = tf.Variable(tf.random.uniform((2000, 1), dtype=tf.float32, minval=-1.0, maxval=1.0))
# 求一阶、二阶偏导
with tf.GradientTape(persistent=True) as tape_xx:
with tf.GradientTape(persistent=True) as tape_x:
u_hat = net(tf.concat([t_var, x_var], axis=1))
du_dt = tape_x.gradient(u_hat, t_var)
du_dx = tape_x.gradient(u_hat, x_var)
du_dxx = tape_xx.gradient(du_dx, x_var)
# eq(1)
eq1_1 = du_dt + u_hat * du_dx - w * du_dxx
mse_1 = mse(eq1_1, u_in_zeros)
# eq(2)
eq2_1 = net(tf.concat([t_bc_zeros, x_var], axis=1))
eq2_2 = -tf.sin(3.1415926 * x_var)
mse_2 = mse(eq2_1, eq2_2)
# eq(3)
eq3_1 = net(tf.concat([t_var, x_in_pos_one], axis=1))
mse_3 = mse(eq3_1, u_in_zeros)
# eq(4)
eq4_1 = net(tf.concat([t_var, x_in_neg_one], axis=1))
mse_4 = mse(eq4_1, u_in_zeros)
loss = mse_1 + mse_2 + mse_3 + mse_4
gradients = tape.gradient(loss, net.trainable_variables)
optimizer.apply_gradients(zip(gradients, net.trainable_variables))
if (epoch+1) % 100 == 0:
t = np.linspace(0, 1, 100)
x = np.linspace(-1, 1, 256)
ms_t, ms_x = np.meshgrid(t, x)
x = np.ravel(ms_x).reshape(-1, 1)
t = np.ravel(ms_t).reshape(-1, 1)
pt_u = net(tf.concat([t, x], axis=1))
u = pt_u.numpy().reshape(ms_t.shape)
fig.clf() # 清空当前Figure对象
ax = fig.add_subplot(111, projection='3d')
ax.set_zlim([-1, 1])
# 在图中添加文字
ax.text(0, 0, 1, "epoch:%d" %(epoch+1), color='black')
ax.plot_surface(ms_t, ms_x, u, cmap=cm.RdYlBu_r, edgecolor='blue', linewidth=0.0003, antialiased=True)
ax.set_xlabel('t')
ax.set_ylabel('x')
ax.set_zlabel('u')
plt.pause(0.1)
plt.show()
迭代20000次后结果如下图(实际并不充分需要迭代这么多步):