Update tutorials (#463)

---------

Co-authored-by: Dario Coscia <93731561+dario-coscia@users.noreply.github.com>
This commit is contained in:
Matteo Bertocchi
2025-02-26 16:21:12 +01:00
committed by FilippoOlivo
parent 8b797d589a
commit bd9b49530a
30 changed files with 3057 additions and 1453 deletions

View File

@@ -25,7 +25,7 @@
# Let's start with useful imports.
# In[ ]:
# In[1]:
## routine needed to run the notebook on Google Colab
@@ -42,25 +42,27 @@ if IN_COLAB:
get_ipython().system('wget "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pts_0.5_0.5" -O "data/pts_0.5_0.5"')
import matplotlib.pyplot as plt
plt.style.use('tableau-colorblind10')
import torch
from pytorch_lightning.callbacks import Callback
import warnings
from pina.problem import SpatialProblem, InverseProblem
from pina.operators import laplacian
from pina.operator import laplacian
from pina.model import FeedForward
from pina.equation import Equation, FixedValue
from pina import Condition, Trainer
from pina.solvers import PINN
from pina.solver import PINN
from pina.domain import CartesianDomain
warnings.filterwarnings('ignore')
# Then, we import the pre-saved data, for ($\mu_1$, $\mu_2$)=($0.5$, $0.5$). These two values are the optimal parameters that we want to find through the neural network training. In particular, we import the `input_points`(the spatial coordinates), and the `output_points` (the corresponding $u$ values evaluated at the `input_points`).
# Then, we import the pre-saved data, for ($\mu_1$, $\mu_2$)=($0.5$, $0.5$). These two values are the optimal parameters that we want to find through the neural network training. In particular, we import the `input` points (the spatial coordinates), and the `target` points (the corresponding $u$ values evaluated at the `input`).
# In[2]:
data_output = torch.load('data/pinn_solution_0.5_0.5').detach()
data_input = torch.load('data/pts_0.5_0.5')
data_output = torch.load('data/pinn_solution_0.5_0.5', weights_only = False).detach()
data_input = torch.load('data/pts_0.5_0.5', weights_only = False)
# Moreover, let's plot also the data points and the reference solution: this is the expected output of the neural network.
@@ -81,7 +83,7 @@ plt.show()
# Then, we initialize the Poisson problem, that is inherited from the `SpatialProblem` and from the `InverseProblem` classes. We here have to define all the variables, and the domain where our unknown parameters ($\mu_1$, $\mu_2$) belong. Notice that the Laplace equation takes as inputs also the unknown variables, that will be treated as parameters that the neural network optimizes during the training process.
# In[ ]:
# In[4]:
### Define ranges of variables
@@ -127,7 +129,7 @@ class Poisson(SpatialProblem, InverseProblem):
'phys_cond': Condition(domain=CartesianDomain({'x': [x_min, x_max], 'y': [y_min, y_max]
}),
equation=Equation(laplace_equation)),
'data': Condition(input_points=data_input.extract(['x', 'y']), output_points=data_output)
'data': Condition(input=data_input.extract(['x', 'y']), target=data_output)
}
problem = Poisson()
@@ -148,12 +150,12 @@ model = FeedForward(
# After that, we discretize the spatial domain.
# In[ ]:
# In[6]:
problem.discretise_domain(20, 'grid', locations=['phys_cond'], variables=['x', 'y'])
problem.discretise_domain(1000, 'random', locations=['bound_cond1', 'bound_cond2',
'bound_cond3', 'bound_cond4'], variables=['x', 'y'])
problem.discretise_domain(20, 'grid', domains=['phys_cond'])
problem.discretise_domain(1000, 'random', domains=['bound_cond1', 'bound_cond2',
'bound_cond3', 'bound_cond4'])
# Here, we define a simple callback for the trainer. We use this callback to save the parameters predicted by the neural network during the training. The parameters are saved every 100 epochs as `torch` tensors in a specified directory (`tmp_dir` in our case).
@@ -162,6 +164,7 @@ problem.discretise_domain(1000, 'random', locations=['bound_cond1', 'bound_cond2
# In[7]:
from lightning.pytorch.callbacks import Callback
# temporary directory for saving logs of training
tmp_dir = "tmp_poisson_inverse"
@@ -176,15 +179,19 @@ class SaveParameters(Callback):
# Then, we define the `PINN` object and train the solver using the `Trainer`.
# In[ ]:
# In[8]:
### train the problem with PINN
from pina.optim import TorchOptimizer
max_epochs = 5000
pinn = PINN(problem, model, optimizer_kwargs={'lr':0.005})
pinn = PINN(problem, model, optimizer=TorchOptimizer(torch.optim.Adam, lr=0.005))
# define the trainer for the solver
trainer = Trainer(solver=pinn, accelerator='cpu', max_epochs=max_epochs,
default_root_dir=tmp_dir, callbacks=[SaveParameters()])
default_root_dir=tmp_dir, enable_model_summary=False, callbacks=[SaveParameters()],
train_size=1.0,
val_size=0.0,
test_size=0.0)
trainer.train()
@@ -196,7 +203,7 @@ trainer.train()
epochs_saved = range(99, max_epochs, 100)
parameters = torch.empty((int(max_epochs/100), 2))
for i, epoch in enumerate(epochs_saved):
params_torch = torch.load('{}/parameters_epoch{}'.format(tmp_dir, epoch))
params_torch = torch.load('{}/parameters_epoch{}'.format(tmp_dir, epoch),weights_only = False)
for e, var in enumerate(pinn.problem.unknown_variables):
parameters[i, e] = params_torch[var].data