Update tutorials 1 through 12 to current version 0.2

This commit is contained in:
Matteo Bertocchi
2025-02-26 16:21:12 +01:00
committed by Nicola Demo
parent 8b797d589a
commit d83ca3af6e
82 changed files with 1074 additions and 1224 deletions

File diff suppressed because one or more lines are too long

View File

@@ -25,15 +25,14 @@ import torch
from torch.nn import Softplus
from pina.problem import SpatialProblem
from pina.operators import laplacian
from pina.operator import laplacian
from pina.model import FeedForward
from pina.solvers import PINN
from pina.solver import PINN
from pina.trainer import Trainer
from pina.plotter import Plotter
from pina.domain import CartesianDomain
from pina.equation import Equation, FixedValue
from pina import Condition, LabelTensor
from pina.callbacks import MetricTracker
from pina import Condition, LabelTensor#,Plotter
from pina.callback import MetricTracker
# ## The problem definition
@@ -83,17 +82,17 @@ class Poisson(SpatialProblem):
problem = Poisson()
# let's discretise the domain
problem.discretise_domain(25, 'grid', locations=['phys_cond'])
problem.discretise_domain(25, 'grid', locations=['bound_cond1', 'bound_cond2', 'bound_cond3', 'bound_cond4'])
problem.discretise_domain(25, 'grid', domains=['phys_cond'])
problem.discretise_domain(25, 'grid', domains=['bound_cond1', 'bound_cond2', 'bound_cond3', 'bound_cond4'])
# ## Solving the problem with standard PINNs
# After the problem, the feed-forward neural network is defined, through the class `FeedForward`. This neural network takes as input the coordinates (in this case $x$ and $y$) and provides the unkwown field of the Poisson problem. The residual of the equations are evaluated at several sampling points (which the user can manipulate using the method `CartesianDomain_pts`) and the loss minimized by the neural network is the sum of the residuals.
#
# In this tutorial, the neural network is composed by two hidden layers of 10 neurons each, and it is trained for 1000 epochs with a learning rate of 0.006 and $l_2$ weight regularization set to $10^{-8}$. These parameters can be modified as desired. We use the `MetricTracker` class to track the metrics during training.
# In this tutorial, the neural network is composed by two hidden layers of 10 neurons each, and it is trained for 1000 epochs. We use the `MetricTracker` class to track the metrics during training.
# In[3]:
# In[4]:
# make model + solver + trainer
@@ -103,7 +102,7 @@ model = FeedForward(
output_dimensions=len(problem.output_variables),
input_dimensions=len(problem.input_variables)
)
pinn = PINN(problem, model, optimizer_kwargs={'lr':0.006, 'weight_decay':1e-8})
pinn = PINN(problem, model)
trainer = Trainer(pinn, max_epochs=1000, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
# train
@@ -113,11 +112,11 @@ trainer.train()
# Now the `Plotter` class is used to plot the results.
# The solution predicted by the neural network is plotted on the left, the exact one is represented at the center and on the right the error between the exact and the predicted solutions is showed.
# In[4]:
# In[5]:
plotter = Plotter()
plotter.plot(solver=pinn)
#plotter = Plotter()
#plotter.plot(solver=pinn)
# ## Solving the problem with extra-features PINNs
@@ -132,12 +131,11 @@ plotter.plot(solver=pinn)
#
# where $x$ and $y$ are the spatial coordinates and $k(x, y)$ is the added feature.
#
# This feature is initialized in the class `SinSin`, which needs to be inherited by the `torch.nn.Module` class and to have the `forward` method. After declaring such feature, we can just incorporate in the `FeedForward` class thanks to the `extra_features` argument.
# **NB**: `extra_features` always needs a `list` as input, you you have one feature just encapsulated it in a class, as in the next cell.
# This feature is initialized in the class `SinSin`, which needs to be inherited by the `torch.nn.Module` class and to have the `forward` method. After declaring such feature, we can just adjust the `FeedForward` class by creating a subclass `FeedForwardWithExtraFeatures` with an adjusted forward method and the additional attribute `extra_features`.
#
# Finally, we perform the same training as before: the problem is `Poisson`, the network is composed by the same number of neurons and optimizer parameters are equal to previous test, the only change is the new extra feature.
# In[5]:
# In[6]:
class SinSin(torch.nn.Module):
@@ -150,28 +148,41 @@ class SinSin(torch.nn.Module):
torch.sin(x.extract(['y'])*torch.pi))
return LabelTensor(t, ['sin(x)sin(y)'])
class FeedForwardWithExtraFeatures(FeedForward):
def __init__(self, input_dimensions, output_dimensions, func, layers, extra_features):
# make model + solver + trainer
model_feat = FeedForward(
layers=[10, 10],
func=Softplus,
super().__init__(input_dimensions=input_dimensions,
output_dimensions=output_dimensions,
func=func,
layers=layers)
self.extra_features = extra_features
def forward(self, x):
extra_feature = self.extra_features[0](x)
x = x.append(extra_feature)
return super().forward(x)
model_feat = FeedForwardWithExtraFeatures(
input_dimensions=len(problem.input_variables) + 1, #we add one as also we consider the extra feature dimension
output_dimensions=len(problem.output_variables),
input_dimensions=len(problem.input_variables)+1
)
pinn_feat = PINN(problem, model_feat, extra_features=[SinSin()], optimizer_kwargs={'lr':0.006, 'weight_decay':1e-8})
func=Softplus,
layers=[10, 10],
extra_features=[SinSin()])
pinn_feat = PINN(problem, model_feat)
trainer_feat = Trainer(pinn_feat, max_epochs=1000, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
# train
trainer_feat.train()
# The predicted and exact solutions and the error between them are represented below.
# We can easily note that now our network, having almost the same condition as before, is able to reach additional order of magnitudes in accuracy.
# In[6]:
# In[7]:
plotter.plot(solver=pinn_feat)
#plotter.plot(solver=pinn_feat)
# ## Solving the problem with learnable extra-features PINNs
@@ -188,7 +199,7 @@ plotter.plot(solver=pinn_feat)
# where $\alpha$ and $\beta$ are the abovementioned parameters.
# Their implementation is quite trivial: by using the class `torch.nn.Parameter` we cam define all the learnable parameters we need, and they are managed by `autograd` module!
# In[7]:
# In[8]:
class SinSinAB(torch.nn.Module):
@@ -208,13 +219,14 @@ class SinSinAB(torch.nn.Module):
# make model + solver + trainer
model_lean= FeedForward(
layers=[10, 10],
func=Softplus,
model_lean = FeedForwardWithExtraFeatures(
input_dimensions=len(problem.input_variables) + 1, #we add one as also we consider the extra feature dimension
output_dimensions=len(problem.output_variables),
input_dimensions=len(problem.input_variables)+1
)
pinn_lean = PINN(problem, model_lean, extra_features=[SinSinAB()], optimizer_kwargs={'lr':0.006, 'weight_decay':1e-8})
func=Softplus,
layers=[10, 10],
extra_features=[SinSinAB()])
pinn_lean = PINN(problem, model_lean)
trainer_learn = Trainer(pinn_lean, max_epochs=1000, accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
# train
@@ -223,17 +235,17 @@ trainer_learn.train()
# Umh, the final loss is not appreciabily better than previous model (with static extra features), despite the usage of learnable parameters. This is mainly due to the over-parametrization of the network: there are many parameter to optimize during the training, and the model in unable to understand automatically that only the parameters of the extra feature (and not the weights/bias of the FFN) should be tuned in order to fit our problem. A longer training can be helpful, but in this case the faster way to reach machine precision for solving the Poisson problem is removing all the hidden layers in the `FeedForward`, keeping only the $\alpha$ and $\beta$ parameters of the extra feature.
# In[8]:
# In[9]:
# make model + solver + trainer
model_lean= FeedForward(
model_lean= FeedForwardWithExtraFeatures(
layers=[],
func=Softplus,
output_dimensions=len(problem.output_variables),
input_dimensions=len(problem.input_variables)+1
)
pinn_learn = PINN(problem, model_lean, extra_features=[SinSinAB()], optimizer_kwargs={'lr':0.01, 'weight_decay':1e-8})
input_dimensions=len(problem.input_variables)+1,
extra_features=[SinSinAB()])
pinn_learn = PINN(problem, model_lean)
trainer_learn = Trainer(pinn_learn, max_epochs=1000, callbacks=[MetricTracker()], accelerator='cpu', enable_model_summary=False) # we train on CPU and avoid model summary at beginning of training (optional)
# train
@@ -245,20 +257,20 @@ trainer_learn.train()
#
# We conclude here by showing the graphical comparison of the unknown field and the loss trend for all the test cases presented here: the standard PINN, PINN with extra features, and PINN with learnable extra features.
# In[9]:
# In[10]:
plotter.plot(solver=pinn_learn)
#plotter.plot(solver=pinn_learn)
# Let us compare the training losses for the various types of training
# In[10]:
# In[11]:
plotter.plot_loss(trainer, logy=True, label='Standard')
plotter.plot_loss(trainer_feat, logy=True,label='Static Features')
plotter.plot_loss(trainer_learn, logy=True, label='Learnable Features')
#plotter.plot_loss(trainer, logy=True, label='Standard')
#plotter.plot_loss(trainer_feat, logy=True,label='Static Features')
#plotter.plot_loss(trainer_learn, logy=True, label='Learnable Features')
# ## What's next?