* solvers -> solver
* adaptive_functions -> adaptive_function
* callbacks -> callback
* operators -> operator
* pinns -> physics_informed_solver
* layers -> block
This commit is contained in:
Dario Coscia
2025-02-19 11:35:43 +01:00
committed by Nicola Demo
parent 810d215ca0
commit df673cad4e
90 changed files with 155 additions and 151 deletions

View File

@@ -1,17 +0,0 @@
__all__ = [
"PINNInterface",
"PINN",
"GradientPINN",
"CausalPINN",
"CompetitivePINN",
"SelfAdaptivePINN",
"RBAPINN",
]
from .pinn_interface import PINNInterface
from .pinn import PINN
from .rba_pinn import RBAPINN
from .causal_pinn import CausalPINN
from .gradient_pinn import GradientPINN
from .competitive_pinn import CompetitivePINN
from .self_adaptive_pinn import SelfAdaptivePINN

View File

@@ -1,207 +0,0 @@
""" Module for Causal PINN. """
import torch
from pina.problem import TimeDependentProblem
from .pinn import PINN
from pina.utils import check_consistency
class CausalPINN(PINN):
r"""
Causal Physics Informed Neural Network (CausalPINN) solver class.
This class implements Causal Physics Informed Neural
Network solvers, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Causal Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N_t}\sum_{i=1}^{N_t}
\omega_{i}\mathcal{L}_r(t_i),
where:
.. math::
\mathcal{L}_r(t) = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i, t)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i, t))
and,
.. math::
\omega_i = \exp\left(\epsilon \sum_{k=1}^{i-1}\mathcal{L}_r(t_k)\right).
:math:`\epsilon` is an hyperparameter, default set to :math:`100`, while
:math:`\mathcal{L}` is a specific loss function,
default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Wang, Sifan, Shyam Sankaran, and Paris
Perdikaris. "Respecting causality for training physics-informed
neural networks." Computer Methods in Applied Mechanics
and Engineering 421 (2024): 116813.
DOI `10.1016 <https://doi.org/10.1016/j.cma.2024.116813>`_.
.. note::
This class can only work for problems inheriting
from at least
:class:`~pina.problem.timedep_problem.TimeDependentProblem` class.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
eps=100):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
:param float eps: The exponential decay parameter; default `100`.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
# checking consistency
check_consistency(eps, (int, float))
self._eps = eps
if not isinstance(self.problem, TimeDependentProblem):
raise ValueError(
"Casual PINN works only for problems"
"inheriting from TimeDependentProblem."
)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the Causal PINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
# split sequentially ordered time tensors into chunks
chunks, labels = self._split_tensor_into_chunks(samples)
# compute residuals - this correspond to ordered loss functions
# values for each time step. Apply `flatten` to ensure obtaining
# a tensor of shape #chunks after concatenating the residuals
time_loss = []
for chunk in chunks:
chunk.labels = labels
# classical PINN loss
residual = self.compute_residual(samples=chunk, equation=equation)
loss_val = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
time_loss.append(loss_val)
# concatenate residuals
time_loss = torch.stack(time_loss)
# compute weights without storing the gradient
with torch.no_grad():
weights = self._compute_weights(time_loss)
return (weights * time_loss).mean()
@property
def eps(self):
"""
The exponential decay parameter.
"""
return self._eps
@eps.setter
def eps(self, value):
"""
Setter method for the eps parameter.
:param float value: The exponential decay parameter.
"""
check_consistency(value, float)
self._eps = value
def _sort_label_tensor(self, tensor):
"""
Sorts the label tensor based on time variables.
:param LabelTensor tensor: The label tensor to be sorted.
:return: The sorted label tensor based on time variables.
:rtype: LabelTensor
"""
# labels input tensors
labels = tensor.labels
# extract time tensor
time_tensor = tensor.extract(self.problem.temporal_domain.variables)
# sort the time tensors (this is very bad for GPU)
_, idx = torch.sort(time_tensor.tensor.flatten())
tensor = tensor[idx]
tensor.labels = labels
return tensor
def _split_tensor_into_chunks(self, tensor):
"""
Splits the label tensor into chunks based on time.
:param LabelTensor tensor: The label tensor to be split.
:return: Tuple containing the chunks and the original labels.
:rtype: Tuple[List[LabelTensor], List]
"""
# extract labels
labels = tensor.labels
# sort input tensor based on time
tensor = self._sort_label_tensor(tensor)
# extract time tensor
time_tensor = tensor.extract(self.problem.temporal_domain.variables)
# count unique tensors in time
_, idx_split = time_tensor.unique(return_counts=True)
# split the tensor based on time
chunks = torch.split(tensor, tuple(idx_split))
return chunks, labels
def _compute_weights(self, loss):
"""
Computes the weights for the physics loss based on the cumulative loss.
:param LabelTensor loss: The physics loss values.
:return: The computed weights for the physics loss.
:rtype: LabelTensor
"""
# compute comulative loss and multiply by epsilon
cumulative_loss = self._eps * torch.cumsum(loss, dim=0)
# return the exponential of the negative weighted cumulative sum
return torch.exp(-cumulative_loss)

View File

@@ -1,336 +0,0 @@
""" Module for Competitive PINN. """
import torch
import copy
from pina.problem import InverseProblem
from .pinn_interface import PINNInterface
from ..solver import MultiSolverInterface
class CompetitivePINN(PINNInterface, MultiSolverInterface):
r"""
Competitive Physics Informed Neural Network (PINN) solver class.
This class implements Competitive Physics Informed Neural
Network solvers, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Competitive Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
with a minimization (on ``model`` parameters) maximation (
on ``discriminator`` parameters) of the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(D(\mathbf{x}_i)\mathcal{A}[\mathbf{u}](\mathbf{x}_i))+
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(D(\mathbf{x}_i)\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
where :math:`D` is the discriminator network, which tries to find the points
where the network performs worst, and :math:`\mathcal{L}` is a specific loss
function, default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Zeng, Qi, et al.
"Competitive physics informed networks." International Conference on
Learning Representations, ICLR 2022
`OpenReview Preprint <https://openreview.net/forum?id=z9SIj-IM7tn>`_.
.. warning::
This solver does not currently support the possibility to pass
``extra_feature``.
"""
def __init__(self,
problem,
model,
discriminator=None,
optimizer_model=None,
optimizer_discriminator=None,
scheduler_model=None,
scheduler_discriminator=None,
weighting=None,
loss=None):
"""
:param AbstractProblem problem: The formulation of the problem.
:param torch.nn.Module model: The neural network model to use
for the model.
:param torch.nn.Module discriminator: The neural network model to use
for the discriminator. If ``None``, the discriminator network will
have the same architecture as the model network.
:param torch.optim.Optimizer optimizer_model: The neural network
optimizer to use for the model network; default `None`.
:param torch.optim.Optimizer optimizer_discriminator: The neural network
optimizer to use for the discriminator network; default `None`.
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
for the model; default `None`.
:param torch.optim.LRScheduler scheduler_discriminator: Learning rate
scheduler for the discriminator; default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
if discriminator is None:
discriminator = copy.deepcopy(model)
super().__init__(models=[model, discriminator],
problem=problem,
optimizers=[optimizer_model, optimizer_discriminator],
schedulers=[scheduler_model, scheduler_discriminator],
weighting=weighting,
loss=loss)
# Set automatic optimization to False
self.automatic_optimization = False
def forward(self, x):
r"""
Forward pass implementation for the PINN solver. It returns the function
evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points
:math:`\mathbf{x}`.
:param LabelTensor x: Input tensor for the PINN solver. It expects
a tensor :math:`N \times D`, where :math:`N` the number of points
in the mesh, :math:`D` the dimension of the problem,
:return: PINN solution evaluated at contro points.
:rtype: LabelTensor
"""
return self.neural_net(x)
def training_step(self, batch):
"""
Solver training step, overridden to perform manual optimization.
:param batch: The batch element in the dataloader.
:type batch: tuple
:return: The sum of the loss functions.
:rtype: LabelTensor
"""
self.optimizer_model.instance.zero_grad()
self.optimizer_discriminator.instance.zero_grad()
loss = super().training_step(batch)
self.optimizer_model.instance.step()
self.optimizer_discriminator.instance.step()
return loss
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the Competitive PINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
# Train the model for one step
with torch.no_grad():
discriminator_bets = self.discriminator(samples)
loss_val = self._train_model(samples, equation, discriminator_bets)
# Detach samples from the existing computational graph and
# create a new one by setting requires_grad to True.
# In alternative set `retain_graph=True`.
samples = samples.detach()
samples.requires_grad_()
# Train the discriminator for one step
discriminator_bets = self.discriminator(samples)
self._train_discriminator(samples, equation, discriminator_bets)
return loss_val
def loss_data(self, input_pts, output_pts):
"""
The data loss for the CompetitivePINN solver. It computes the loss
between the network output against the true solution.
:param LabelTensor input_tensor: The input to the neural networks.
:param LabelTensor output_tensor: The true solution to compare the
network solution.
:return: The computed data loss.
:rtype: torch.Tensor
"""
loss_val = (super().loss_data(input_pts, output_pts))
# prepare for optimizer step called in training step
loss_val.backward()
return loss_val
def configure_optimizers(self):
"""
Optimizer configuration for the Competitive PINN solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized
self.optimizer_model.hook(self.neural_net.parameters())
self.optimizer_discriminator.hook(self.discriminator.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer_model.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler_model.hook(self.optimizer_model)
self.scheduler_discriminator.hook(self.optimizer_discriminator)
return (
[self.optimizer_model.instance,
self.optimizer_discriminator.instance],
[self.scheduler_model.instance,
self.scheduler_discriminator.instance]
)
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints.
:param torch.Tensor outputs: The output from the model for the
current batch.
:param tuple batch: The current batch of data.
:param int batch_idx: The index of the current batch.
:return: Whatever is returned by the parent
method ``on_train_batch_end``.
:rtype: Any
"""
# increase by one the counter of optimization to save loggers
(
self.trainer.fit_loop.epoch_loop.manual_optimization
.optim_step_progress.total.completed
) += 1
return super().on_train_batch_end(outputs, batch, batch_idx)
def _train_discriminator(self, samples, equation, discriminator_bets):
"""
Trains the discriminator network of the Competitive PINN.
:param LabelTensor samples: Input samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation representing
the physics.
:param Tensor discriminator_bets: Predictions made by the discriminator
network.
"""
# Compute residual. Detach since discriminator weights are fixed
residual = self.compute_residual(
samples=samples, equation=equation
).detach()
# Compute competitive residual, then maximise the loss
competitive_residual = residual * discriminator_bets
loss_val = -self.loss(
torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual,
)
# prepare for optimizer step called in training step
self.manual_backward(loss_val)
return
def _train_model(self, samples, equation, discriminator_bets):
"""
Trains the model network of the Competitive PINN.
:param LabelTensor samples: Input samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation representing
the physics.
:param Tensor discriminator_bets: Predictions made by the discriminator.
network.
:return: The computed data loss.
:rtype: torch.Tensor
"""
# Compute residual
residual = self.compute_residual(samples=samples, equation=equation)
with torch.no_grad():
loss_residual = self.loss(torch.zeros_like(residual), residual)
# Compute competitive residual. Detach discriminator_bets
# to optimize only the generator model
competitive_residual = residual * discriminator_bets.detach()
loss_val = self.loss(
torch.zeros_like(competitive_residual, requires_grad=True),
competitive_residual,
)
# prepare for optimizer step called in training step
self.manual_backward(loss_val)
return loss_residual
@property
def neural_net(self):
"""
Returns the neural network model.
:return: The neural network model.
:rtype: torch.nn.Module
"""
return self.models[0]
@property
def discriminator(self):
"""
Returns the discriminator model (if applicable).
:return: The discriminator model.
:rtype: torch.nn.Module
"""
return self.models[1]
@property
def optimizer_model(self):
"""
Returns the optimizer associated with the neural network model.
:return: The optimizer for the neural network model.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[0]
@property
def optimizer_discriminator(self):
"""
Returns the optimizer associated with the discriminator (if applicable).
:return: The optimizer for the discriminator.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[1]
@property
def scheduler_model(self):
"""
Returns the scheduler associated with the neural network model.
:return: The scheduler for the neural network model.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[0]
@property
def scheduler_discriminator(self):
"""
Returns the scheduler associated with the discriminator (if applicable).
:return: The scheduler for the discriminator.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[1]

View File

@@ -1,124 +0,0 @@
""" Module for Gradient PINN. """
import torch
from .pinn import PINN
from pina.operators import grad
from pina.problem import SpatialProblem
class GradientPINN(PINN):
r"""
Gradient Physics Informed Neural Network (GradientPINN) solver class.
This class implements Gradient Physics Informed Neural
Network solvers, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Gradient Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} =& \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i)) + \\
&\frac{1}{N}\sum_{i=1}^N
\nabla_{\mathbf{x}}\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\nabla_{\mathbf{x}}\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
where :math:`\mathcal{L}` is a specific loss function,
default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Yu, Jeremy, et al. "Gradient-enhanced
physics-informed neural networks for forward and inverse
PDE problems." Computer Methods in Applied Mechanics
and Engineering 393 (2022): 114823.
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2022.114823>`_.
.. note::
This class can only work for problems inheriting
from at least :class:`~pina.problem.spatial_problem.SpatialProblem`
class.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem. It must
inherit from at least
:class:`~pina.problem.spatial_problem.SpatialProblem` to compute
the gradient of the loss.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
if not isinstance(self.problem, SpatialProblem):
raise ValueError(
"Gradient PINN computes the gradient of the "
"PINN loss with respect to the spatial "
"coordinates, thus the PINA problem must be "
"a SpatialProblem."
)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the GPINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
# classical PINN loss
residual = self.compute_residual(samples=samples, equation=equation)
loss_value = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
# gradient PINN loss
loss_value = loss_value.reshape(-1, 1)
loss_value.labels = ["__loss"]
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
g_loss_phys = self.loss(
torch.zeros_like(loss_grad, requires_grad=True), loss_grad
)
return loss_value + g_loss_phys

View File

@@ -1,118 +0,0 @@
""" Module for Physics Informed Neural Network. """
import torch
from .pinn_interface import PINNInterface
from ..solver import SingleSolverInterface
from ...problem import InverseProblem
class PINN(PINNInterface, SingleSolverInterface):
r"""
Physics Informed Neural Network (PINN) solver class.
This class implements Physics Informed Neural
Network solvers, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Physics Informed Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
where :math:`\mathcal{L}` is a specific loss function,
default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Karniadakis, G. E., Kevrekidis, I. G., Lu, L.,
Perdikaris, P., Wang, S., & Yang, L. (2021).
Physics-informed machine learning. Nature Reviews Physics, 3, 422-440.
DOI: `10.1038 <https://doi.org/10.1038/s42254-021-00314-5>`_.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the PINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
residual = self.compute_residual(samples=samples, equation=equation)
loss_value = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return loss_value
def configure_optimizers(self):
"""
Optimizer configuration for the PINN solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized.
self.optimizer.hook(self.model.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler.hook(self.optimizer)
return (
[self.optimizer.instance],
[self.scheduler.instance]
)

View File

@@ -1,191 +0,0 @@
""" Module for Physics Informed Neural Network Interface."""
from abc import ABCMeta, abstractmethod
import torch
from torch.nn.modules.loss import _Loss
from ..solver import SolverInterface
from ...utils import check_consistency
from ...loss.loss_interface import LossInterface
from ...problem import InverseProblem
from ...condition import (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
class PINNInterface(SolverInterface, metaclass=ABCMeta):
"""
Base PINN solver class. This class implements the Solver Interface
for Physics Informed Neural Network solvers.
This class can be used to define PINNs with multiple ``optimizers``,
and/or ``models``.
By default it takes :class:`~pina.problem.abstract_problem.AbstractProblem`,
so the user can choose what type of problem the implemented solver,
inheriting from this class, is designed to solve.
"""
accepted_conditions_types = (
InputOutputPointsCondition,
InputPointsEquationCondition,
DomainEquationCondition
)
def __init__(self,
problem,
loss=None,
**kwargs):
"""
:param AbstractProblem problem: A problem definition instance.
:param torch.nn.Module loss: The loss function to be minimized,
default `None`.
"""
if loss is None:
loss = torch.nn.MSELoss()
super().__init__(problem=problem,
use_lt=True,
**kwargs)
# check consistency
check_consistency(loss, (LossInterface, _Loss), subclass=False)
# assign variables
self._loss = loss
# inverse problem handling
if isinstance(self.problem, InverseProblem):
self._params = self.problem.unknown_parameters
self._clamp_params = self._clamp_inverse_problem_params
else:
self._params = None
self._clamp_params = lambda: None
self.__metric = None
def optimization_cycle(self, batch):
return self._run_optimization_cycle(batch, self.loss_phys)
@torch.set_grad_enabled(True)
def validation_step(self, batch):
losses = self._run_optimization_cycle(batch, self._residual_loss)
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
self.store_log('val_loss', loss, self.get_batch_size(batch))
return loss
@torch.set_grad_enabled(True)
def test_step(self, batch):
losses = self._run_optimization_cycle(batch, self._residual_loss)
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
self.store_log('test_loss', loss, self.get_batch_size(batch))
return loss
def loss_data(self, input_pts, output_pts):
"""
The data loss for the PINN solver. It computes the loss between
the network output against the true solution. This function
should not be override if not intentionally.
:param LabelTensor input_pts: The input to the neural networks.
:param LabelTensor output_pts: The true solution to compare the
network solution.
:return: The residual loss averaged on the input coordinates
:rtype: torch.Tensor
"""
return self._loss(self.forward(input_pts), output_pts)
@abstractmethod
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics informed solver based on given
samples and equation. This method must be override by all inherited
classes and it is the core to define a new physics informed solver.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
pass
def compute_residual(self, samples, equation):
"""
Compute the residual for Physics Informed learning. This function
returns the :obj:`~pina.equation.equation.Equation` specified in the
:obj:`~pina.condition.Condition` evaluated at the ``samples`` points.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The residual of the neural network solution.
:rtype: LabelTensor
"""
try:
residual = equation.residual(samples, self.forward(samples))
except TypeError:
# this occurs when the function has three inputs (inverse problem)
residual = equation.residual(
samples,
self.forward(samples),
self._params
)
return residual
def _residual_loss(self, samples, equation):
residuals = self.compute_residual(samples, equation)
return self.loss(residuals, torch.zeros_like(residuals))
def _run_optimization_cycle(self, batch, loss_residuals):
condition_loss = {}
for condition_name, points in batch:
self.__metric = condition_name
# if equations are passed
if 'output_points' not in points:
input_pts = points['input_points']
condition = self.problem.conditions[condition_name]
loss = loss_residuals(
input_pts.requires_grad_(),
condition.equation
)
# if data are passed
else:
input_pts = points['input_points']
output_pts = points['output_points']
loss = self.loss_data(
input_pts=input_pts.requires_grad_(),
output_pts=output_pts
)
# append loss
condition_loss[condition_name] = loss
# clamp unknown parameters in InverseProblem (if needed)
self._clamp_params()
return condition_loss
def _clamp_inverse_problem_params(self):
"""
Clamps the parameters of the inverse problem
solver to the specified ranges.
"""
for v in self._params:
self._params[v].data.clamp_(
self.problem.unknown_parameter_domain.range_[v][0],
self.problem.unknown_parameter_domain.range_[v][1],
)
@property
def loss(self):
"""
Loss used for training.
"""
return self._loss
@property
def current_condition_name(self):
"""
The current condition name.
"""
return self.__metric

View File

@@ -1,172 +0,0 @@
""" Module for Residual-Based Attention PINN. """
from copy import deepcopy
import torch
from .pinn import PINN
from ...utils import check_consistency
class RBAPINN(PINN):
r"""
Residual-based Attention PINN (RBAPINN) solver class.
This class implements Residual-based Attention Physics Informed Neural
Network solvers, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Residual-based Attention Physics Informed Neural Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N} \sum_{i=1}^{N_\Omega}
\lambda_{\Omega}^{i} \mathcal{L} \left( \mathcal{A}
[\mathbf{u}](\mathbf{x}) \right) + \frac{1}{N}
\sum_{i=1}^{N_{\partial\Omega}}
\lambda_{\partial\Omega}^{i} \mathcal{L}
\left( \mathcal{B}[\mathbf{u}](\mathbf{x})
\right),
denoting the weights as
:math:`\lambda_{\Omega}^1, \dots, \lambda_{\Omega}^{N_\Omega}` and
:math:`\lambda_{\partial \Omega}^1, \dots,
\lambda_{\Omega}^{N_\partial \Omega}`
for :math:`\Omega` and :math:`\partial \Omega`, respectively.
Residual-based Attention Physics Informed Neural Network computes
the weights by updating them at every epoch as follows
.. math::
\lambda_i^{k+1} \leftarrow \gamma\lambda_i^{k} +
\eta\frac{\lvert r_i\rvert}{\max_j \lvert r_j\rvert},
where :math:`r_i` denotes the residual at point :math:`i`,
:math:`\gamma` denotes the decay rate, and :math:`\eta` is
the learning rate for the weights' update.
.. seealso::
**Original reference**: Sokratis J. Anagnostopoulos, Juan D. Toscano,
Nikolaos Stergiopulos, and George E. Karniadakis.
"Residual-based attention and connection to information
bottleneck theory in PINNs".
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
DOI: `10.1016/
j.cma.2024.116805 <https://doi.org/10.1016/j.cma.2024.116805>`_.
"""
def __init__(self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
eta=0.001,
gamma=0.999):
"""
:param torch.nn.Module model: The neural network model to use.
:param AbstractProblem problem: The formulation of the problem.
:param torch.optim.Optimizer optimizer: The neural network optimizer to
use; default `None`.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler;
default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
:param float | int eta: The learning rate for the weights of the
residual; default 0.001.
:param float gamma: The decay parameter in the update of the weights
of the residual. Must be between 0 and 1; default 0.999.
"""
super().__init__(model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss)
# check consistency
check_consistency(eta, (float, int))
check_consistency(gamma, float)
assert (
0 < gamma < 1
), f"Invalid range: expected 0 < gamma < 1, got {gamma=}"
self.eta = eta
self.gamma = gamma
# initialize weights
self.weights = {}
for condition_name in problem.conditions:
self.weights[condition_name] = 0
# define vectorial loss
self._vectorial_loss = deepcopy(self.loss)
self._vectorial_loss.reduction = "none"
# for now RBAPINN is implemented only for batch_size = None
def on_train_start(self):
if self.trainer.batch_size is not None:
raise NotImplementedError("RBAPINN only works with full batch "
"size, set batch_size=None inside the "
"Trainer to use the solver.")
return super().on_train_start()
def _vect_to_scalar(self, loss_value):
"""
Elaboration of the pointwise loss.
:param LabelTensor loss_value: the matrix of pointwise loss.
:return: the scalar loss.
:rtype LabelTensor
"""
if self.loss.reduction == "mean":
ret = torch.mean(loss_value)
elif self.loss.reduction == "sum":
ret = torch.sum(loss_value)
else:
raise RuntimeError(
f"Invalid reduction, got {self.loss.reduction} "
"but expected mean or sum."
)
return ret
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the residual-based attention PINN
solver based on given samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: LabelTensor
"""
residual = self.compute_residual(samples=samples, equation=equation)
cond = self.current_condition_name
r_norm = (
self.eta * torch.abs(residual)
/ (torch.max(torch.abs(residual)) + 1e-12)
)
self.weights[cond] = (self.gamma*self.weights[cond] + r_norm).detach()
loss_value = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return self._vect_to_scalar(self.weights[cond] ** 2 * loss_value)

View File

@@ -1,430 +0,0 @@
""" Module for Self-Adaptive PINN. """
import torch
from copy import deepcopy
from pina.utils import check_consistency
from pina.problem import InverseProblem
from ..solver import MultiSolverInterface
from .pinn_interface import PINNInterface
class Weights(torch.nn.Module):
"""
This class aims to implements the mask model for the
self-adaptive weights of the Self-Adaptive PINN solver.
"""
def __init__(self, func):
"""
:param torch.nn.Module func: the mask module of SAPINN.
"""
super().__init__()
check_consistency(func, torch.nn.Module)
self.sa_weights = torch.nn.Parameter(torch.Tensor())
self.func = func
def forward(self):
"""
Forward pass implementation for the mask module.
It returns the function on the weights evaluation.
:return: evaluation of self adaptive weights through the mask.
:rtype: torch.Tensor
"""
return self.func(self.sa_weights)
class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
r"""
Self Adaptive Physics Informed Neural Network (SelfAdaptivePINN)
solver class. This class implements Self-Adaptive Physics Informed Neural
Network solvers, using a user specified ``model`` to solve a specific
``problem``. It can be used for solving both forward and inverse problems.
The Self Adapive Physics Informed Neural Network aims to find
the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`
of the differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
integrating the pointwise loss evaluation through a mask :math:`m` and
self adaptive weights that permit to focus the loss function on
specific training samples.
The loss function to solve the problem is
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N} \sum_{i=1}^{N_\Omega} m
\left( \lambda_{\Omega}^{i} \right) \mathcal{L} \left( \mathcal{A}
[\mathbf{u}](\mathbf{x}) \right) + \frac{1}{N}
\sum_{i=1}^{N_{\partial\Omega}}
m \left( \lambda_{\partial\Omega}^{i} \right) \mathcal{L}
\left( \mathcal{B}[\mathbf{u}](\mathbf{x})
\right),
denoting the self adaptive weights as
:math:`\lambda_{\Omega}^1, \dots, \lambda_{\Omega}^{N_\Omega}` and
:math:`\lambda_{\partial \Omega}^1, \dots,
\lambda_{\Omega}^{N_\partial \Omega}`
for :math:`\Omega` and :math:`\partial \Omega`, respectively.
Self Adaptive Physics Informed Neural Network identifies the solution
and appropriate self adaptive weights by solving the following problem
.. math::
\min_{w} \max_{\lambda_{\Omega}^k, \lambda_{\partial \Omega}^s}
\mathcal{L} ,
where :math:`w` denotes the network parameters, and
:math:`\mathcal{L}` is a specific loss
function, default Mean Square Error:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: McClenny, Levi D., and Ulisses M. Braga-Neto.
"Self-adaptive physics-informed neural networks."
Journal of Computational Physics 474 (2023): 111722.
DOI: `10.1016/
j.jcp.2022.111722 <https://doi.org/10.1016/j.jcp.2022.111722>`_.
"""
def __init__(self,
problem,
model,
weight_function=torch.nn.Sigmoid(),
optimizer_model=None,
optimizer_weights=None,
scheduler_model=None,
scheduler_weights=None,
weighting=None,
loss=None):
"""
:param AbstractProblem problem: The formulation of the problem.
:param torch.nn.Module model: The neural network model to use for
the model.
:param torch.nn.Module weight_function: The neural network model
related to the Self-Adaptive PINN mask; default `torch.nn.Sigmoid()`
:param torch.optim.Optimizer optimizer_model: The neural network
optimizer to use for the model network; default `None`.
:param torch.optim.Optimizer optimizer_weights: The neural network
optimizer to use for mask model; default `None`.
:param torch.optim.LRScheduler scheduler_model: Learning rate scheduler
for the model; default `None`.
:param torch.optim.LRScheduler scheduler_weights: Learning rate
scheduler for the mask model; default `None`.
:param WeightingInterface weighting: The weighting schema to use;
default `None`.
:param torch.nn.Module loss: The loss function to be minimized;
default `None`.
"""
# check consistency weitghs_function
check_consistency(weight_function, torch.nn.Module)
# create models for weights
weights_dict = {}
for condition_name in problem.conditions:
weights_dict[condition_name] = Weights(weight_function)
weights_dict = torch.nn.ModuleDict(weights_dict)
super().__init__(models=[model, weights_dict],
problem=problem,
optimizers=[optimizer_model, optimizer_weights],
schedulers=[scheduler_model, scheduler_weights],
weighting=weighting,
loss=loss)
# Set automatic optimization to False
self.automatic_optimization = False
self._vectorial_loss = deepcopy(self.loss)
self._vectorial_loss.reduction = "none"
def forward(self, x):
"""
Forward pass implementation for the PINN
solver. It returns the function
evaluation :math:`\mathbf{u}(\mathbf{x})` at the control points
:math:`\mathbf{x}`.
:param LabelTensor x: Input tensor for the SAPINN solver. It expects
a tensor :math:`N \\times D`, where :math:`N` the number of points
in the mesh, :math:`D` the dimension of the problem,
:return: PINN solution.
:rtype: LabelTensor
"""
return self.model(x)
def training_step(self, batch):
"""
Solver training step, overridden to perform manual optimization.
:param batch: The batch element in the dataloader.
:type batch: tuple
:return: The sum of the loss functions.
:rtype: LabelTensor
"""
self.optimizer_model.instance.zero_grad()
self.optimizer_weights.instance.zero_grad()
loss = super().training_step(batch)
self.optimizer_model.instance.step()
self.optimizer_weights.instance.step()
return loss
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the SAPINN solver based on given
samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation
representing the physics.
:return: The physics loss calculated based on given
samples and equation.
:rtype: torch.Tensor
"""
# Train the weights
weighted_loss = self._loss_phys(samples, equation)
loss_value = -weighted_loss.as_subclass(torch.Tensor)
self.manual_backward(loss_value)
# Detach samples from the existing computational graph and
# create a new one by setting requires_grad to True.
# In alternative set `retain_graph=True`.
samples = samples.detach()
samples.requires_grad_()# = True
# Train the model
weighted_loss = self._loss_phys(samples, equation)
loss_value = weighted_loss.as_subclass(torch.Tensor)
self.manual_backward(loss_value)
return loss_value
def loss_data(self, input_pts, output_pts):
"""
Computes the data loss for the SAPINN solver based on input and
output. It computes the loss between the
network output against the true solution.
:param LabelTensor input_pts: The input to the neural networks.
:param LabelTensor output_pts: The true solution to compare the
network solution.
:return: The computed data loss.
:rtype: torch.Tensor
"""
residual = self.forward(input_pts) - output_pts
loss = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
loss_value = self._vect_to_scalar(loss).as_subclass(torch.Tensor)
self.manual_backward(loss_value)
return loss_value
def configure_optimizers(self):
"""
Optimizer configuration for the SelfAdaptive PINN solver.
:return: The optimizers and the schedulers
:rtype: tuple(list, list)
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized
self.optimizer_model.hook(self.model.parameters())
self.optimizer_weights.hook(self.weights_dict.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer_model.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler_model.hook(self.optimizer_model)
self.scheduler_weights.hook(self.optimizer_weights)
return (
[self.optimizer_model.instance,
self.optimizer_weights.instance],
[self.scheduler_model.instance,
self.scheduler_weights.instance]
)
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch, and ovverides
the PytorchLightining implementation for logging the checkpoints.
:param torch.Tensor outputs: The output from the model for the
current batch.
:param tuple batch: The current batch of data.
:param int batch_idx: The index of the current batch.
:return: Whatever is returned by the parent
method ``on_train_batch_end``.
:rtype: Any
"""
# increase by one the counter of optimization to save loggers
(
self.trainer.fit_loop.epoch_loop.manual_optimization
.optim_step_progress.total.completed
) += 1
return super().on_train_batch_end(outputs, batch, batch_idx)
def on_train_start(self):
"""
This method is called at the start of the training for setting
the self adaptive weights as parameters of the mask model.
:return: Whatever is returned by the parent
method ``on_train_start``.
:rtype: Any
"""
if self.trainer.batch_size is not None:
raise NotImplementedError("SelfAdaptivePINN only works with full "
"batch size, set batch_size=None inside "
"the Trainer to use the solver.")
device = torch.device(
self.trainer._accelerator_connector._accelerator_flag
)
# Initialize the self adaptive weights only for training points
for condition_name, tensor in (
self.trainer.data_module.train_dataset.input_points.items()
):
self.weights_dict[condition_name].sa_weights.data = (
torch.rand((tensor.shape[0], 1), device=device)
)
return super().on_train_start()
def on_load_checkpoint(self, checkpoint):
"""
Override the Pytorch Lightning ``on_load_checkpoint`` to handle
checkpoints for Self-Adaptive Weights. This method should not be
overridden if not intentionally.
:param dict checkpoint: Pytorch Lightning checkpoint dict.
"""
# First initialize self-adaptive weights with correct shape,
# then load the values from the checkpoint.
for condition_name, _ in self.problem.input_pts.items():
shape = checkpoint['state_dict'][
f"_pina_models.1.{condition_name}.sa_weights"
].shape
self.weights_dict[condition_name].sa_weights.data = (
torch.rand(shape)
)
return super().on_load_checkpoint(checkpoint)
def _loss_phys(self, samples, equation):
"""
Computation of the physical loss for SelfAdaptive PINN solver.
:param LabelTensor samples: Input samples to evaluate the physics loss.
:param EquationInterface equation: the governing equation representing
the physics.
:return: tuple with weighted and not weighted scalar loss
:rtype: List[LabelTensor, LabelTensor]
"""
residual = self.compute_residual(samples, equation)
weights = self.weights_dict[self.current_condition_name].forward()
loss_value = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return self._vect_to_scalar(weights * loss_value)
def _vect_to_scalar(self, loss_value):
"""
Elaboration of the pointwise loss through the mask model and the
self adaptive weights
:param LabelTensor loss_value: the matrix of pointwise loss
:return: the scalar loss
:rtype LabelTensor
"""
if self.loss.reduction == "mean":
ret = torch.mean(loss_value)
elif self.loss.reduction == "sum":
ret = torch.sum(loss_value)
else:
raise RuntimeError(
f"Invalid reduction, got {self.loss.reduction} "
"but expected mean or sum."
)
return ret
@property
def model(self):
"""
Return the mask models associate to the application of
the mask to the self adaptive weights for each loss that
compones the global loss of the problem.
:return: The ModuleDict for mask models.
:rtype: torch.nn.ModuleDict
"""
return self.models[0]
@property
def weights_dict(self):
"""
Return the mask models associate to the application of
the mask to the self adaptive weights for each loss that
compones the global loss of the problem.
:return: The ModuleDict for mask models.
:rtype: torch.nn.ModuleDict
"""
return self.models[1]
@property
def scheduler_model(self):
"""
Returns the scheduler associated with the neural network model.
:return: The scheduler for the neural network model.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[0]
@property
def scheduler_weights(self):
"""
Returns the scheduler associated with the mask model (if applicable).
:return: The scheduler for the mask model.
:rtype: torch.optim.lr_scheduler._LRScheduler
"""
return self.schedulers[1]
@property
def optimizer_model(self):
"""
Returns the optimizer associated with the neural network model.
:return: The optimizer for the neural network model.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[0]
@property
def optimizer_weights(self):
"""
Returns the optimizer associated with the mask model (if applicable).
:return: The optimizer for the mask model.
:rtype: torch.optim.Optimizer
"""
return self.optimizers[1]