python神经网络解微分方程pytorch框架

文章展示了如何利用Python的PyTorch框架构建神经网络来逼近和解决一阶和二阶常微分方程。通过设置初始条件,定义网络结构,训练模型并调整学习率,最终得到近似解并绘制了误差迭代曲线。
摘要由CSDN通过智能技术生成

python神经网络解微分方程

一阶常微分方程

后面补上

二阶常微分方程

微分方程的表达式如下,解析解已经给出,现用神经网络进行逼近
二阶常微分方程
使用的编程语言是python,框架为pytorch

逼近曲线
在这里插入图片描述

误差迭代曲线

在这里插入图片描述

运行结果
500/500: 100%|████████████████████████████| 500/500 [00:16<00:00, 30.02it/s, lr=0.001, loss=0.00252]

进程已结束,退出代码0

代码

import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
import math

# same_seed 确定结果一致
def same_seed(seed):
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True



# 初值
g0 = 0
g0_diff = 1
x_real = torch.linspace(0, 1, 500).view(-1,1)
y_real = torch.exp(-x_real/5) * torch.sin(x_real)

x = x_real.numpy()
y = y_real.numpy()
device = "cuda:0" if torch.cuda.is_available() else "cpu"

inf_s = 1e-3

# 神经网络的构造函数
def predict(model, x_real, g0_diff, g0):
    train_x = x_real.to(device)
    u_diff_pred = model(train_x) * train_x ** 2 + g0_diff * train_x + g0
    return u_diff_pred


class Basic_Unit(nn.Module):
    def __init__(self, input_dim, hidden_dim):
        super(Basic_Unit, self).__init__()
        self.layers = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.ReLU()
            #nn.LeakyReLU(),
            #nn.BatchNorm1d(hidden_dim),
            #nn.Dropout(0.5)
        )

    def forward(self, x):
        return self.layers(x)


class Neuron_fun(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, hidden_layers):
        super(Neuron_fun, self).__init__()

        self.layers = nn.Sequential(
            Basic_Unit(input_dim, hidden_dim),

            *[Basic_Unit(hidden_dim, hidden_dim) for _ in range(hidden_layers)],

            nn.Linear(hidden_dim, output_dim)
        )
    def forward(self, x):
        return self.layers(x)



criterion = nn.MSELoss()



def train(model, lr, iterations, weight_decay):
    optimizer = torch.optim.Adam(model.parameters(), lr = lr, weight_decay = weight_decay)
    torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, 50, eta_min=0, last_epoch=-1)
    #torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=10)
    model.train()
    tqbar = tqdm(range(iterations), ncols=100)
    costs = []
    best_loss = math.inf

    for epoch in tqbar:
        tqbar.set_description(f"{epoch+1}/{iterations}")
        #train_x, train_y = x_real.to(device), y_real.to(device)
        train_x = x_real.to(device)
        train_y = -torch.exp(-train_x/5) / 5 * torch.cos(train_x)


        y_nn = predict(model, train_x, g0_diff, g0)
        y_nn_diff = (predict(model, train_x + inf_s, g0_diff, g0) - predict(model, train_x, g0_diff, g0)) / inf_s
        y_nn_diff2 = (predict(model, train_x, g0_diff, g0) - 2 * predict(model, train_x + inf_s, g0_diff, g0) + predict(model, train_x + 2 * inf_s, g0_diff, g0) ) / inf_s**2

        y_pred = y_nn_diff2 + y_nn_diff / 5 + y_nn


        loss = criterion(y_pred, train_y)
        costs.append(loss.item())

        tqbar.set_postfix({"lr":optimizer.param_groups[0]["lr"],"loss":loss.item()})

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if best_loss > loss.item():
            best_loss = loss.item()
            torch.save(model.state_dict(), "./model.ckpt")


    return model, costs

same_seed(1024)

model = Neuron_fun(input_dim=1, hidden_dim=256, output_dim=1, hidden_layers=2).to(device)
#print(model)
lr = 1e-3
iterations = 500
weight_decay = 0

model, costs = train(model, lr, iterations, weight_decay)

model.load_state_dict(torch.load("./model.ckpt"))


model.eval()
with torch.no_grad():
    pred_y = predict(model, x_real, g0_diff=1, g0 = 0).cpu().numpy()
    plt.figure(0)
    plt.plot(x, y, "g-")
    plt.plot(x, pred_y, "r-")
    plt.legend(["real", "pred"])

    plt.figure(1)
    plt.plot(range(iterations), costs, "g-")
    plt.xlabel("iterations")
    plt.xlabel("loss")
    plt.title("loss vs. iterations")
    plt.show()
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值