This commit is contained in:
Dario Coscia
2025-03-19 15:34:11 +01:00
committed by Nicola Demo
parent 716d43f146
commit 0c4ab3e571
18 changed files with 17 additions and 17 deletions

View File

@@ -0,0 +1,19 @@
"""Module for the Physics-Informed solvers."""
__all__ = [
"PINNInterface",
"PINN",
"GradientPINN",
"CausalPINN",
"CompetitivePINN",
"SelfAdaptivePINN",
"RBAPINN",
]
from .pinn_interface import PINNInterface
from .pinn import PINN
from .rba_pinn import RBAPINN
from .causal_pinn import CausalPINN
from .gradient_pinn import GradientPINN
from .competitive_pinn import CompetitivePINN
from .self_adaptive_pinn import SelfAdaptivePINN

View File

@@ -0,0 +1,219 @@
"""Module for the Causal PINN solver."""
import torch
from ...problem import TimeDependentProblem
from .pinn import PINN
from ...utils import check_consistency
class CausalPINN(PINN):
r"""
Causal Physics-Informed Neural Network (CausalPINN) solver class.
This class implements the Causal Physics-Informed Neural Network solver,
using a user specified ``model`` to solve a specific ``problem``.
It can be used to solve both forward and inverse problems.
The Causal Physics-Informed Neural Network solver aims to find the solution
:math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m` of a differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function:
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N_t}\sum_{i=1}^{N_t}
\omega_{i}\mathcal{L}_r(t_i),
where:
.. math::
\mathcal{L}_r(t) = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i, t)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i, t))
and,
.. math::
\omega_i = \exp\left(\epsilon \sum_{k=1}^{i-1}\mathcal{L}_r(t_k)\right).
:math:`\epsilon` is an hyperparameter, set by default to :math:`100`, while
:math:`\mathcal{L}` is a specific loss function, typically the MSE:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Wang, Sifan, Shyam Sankaran, and Paris
Perdikaris.
*Respecting causality for training physics-informed
neural networks.*
Computer Methods in Applied Mechanics and Engineering 421 (2024):116813.
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2024.116813>`_.
.. note::
This class is only compatible with problems that inherit from the
:class:`~pina.problem.time_dependent_problem.TimeDependentProblem`
class.
"""
def __init__(
self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
eps=100,
):
"""
Initialization of the :class:`CausalPINN` class.
:param AbstractProblem problem: The problem to be solved. It must
inherit from at least
:class:`~pina.problem.time_dependent_problem.TimeDependentProblem`.
:param torch.nn.Module model: The neural network model to be used.
:param Optimizer optimizer: The optimizer to be used.
If `None`, the :class:`torch.optim.Adam` optimizer is used.
Default is ``None``.
:param torch.optim.LRScheduler scheduler: Learning rate scheduler.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param WeightingInterface weighting: The weighting schema to be used.
If `None`, no weighting schema is used. Default is ``None``.
:param torch.nn.Module loss: The loss function to be minimized.
If `None`, the :class:`torch.nn.MSELoss` loss is used.
Default is `None`.
:param float eps: The exponential decay parameter. Default is ``100``.
:raises ValueError: If the problem is not a TimeDependentProblem.
"""
super().__init__(
model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss,
)
# checking consistency
check_consistency(eps, (int, float))
self._eps = eps
if not isinstance(self.problem, TimeDependentProblem):
raise ValueError(
"Casual PINN works only for problems"
"inheriting from TimeDependentProblem."
)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics-informed solver based on the
provided samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation.
:return: The computed physics loss.
:rtype: LabelTensor
"""
# split sequentially ordered time tensors into chunks
chunks, labels = self._split_tensor_into_chunks(samples)
# compute residuals - this correspond to ordered loss functions
# values for each time step. Apply `flatten` to ensure obtaining
# a tensor of shape #chunks after concatenating the residuals
time_loss = []
for chunk in chunks:
chunk.labels = labels
# classical PINN loss
residual = self.compute_residual(samples=chunk, equation=equation)
loss_val = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
time_loss.append(loss_val)
# concatenate residuals
time_loss = torch.stack(time_loss)
# compute weights without storing the gradient
with torch.no_grad():
weights = self._compute_weights(time_loss)
return (weights * time_loss).mean()
@property
def eps(self):
"""
The exponential decay parameter.
:return: The exponential decay parameter.
:rtype: float
"""
return self._eps
@eps.setter
def eps(self, value):
"""
Set the exponential decay parameter.
:param float value: The exponential decay parameter.
"""
check_consistency(value, float)
self._eps = value
def _sort_label_tensor(self, tensor):
"""
Sort the tensor with respect to the temporal variables.
:param LabelTensor tensor: The tensor to be sorted.
:return: The tensor sorted with respect to the temporal variables.
:rtype: LabelTensor
"""
# labels input tensors
labels = tensor.labels
# extract time tensor
time_tensor = tensor.extract(self.problem.temporal_domain.variables)
# sort the time tensors (this is very bad for GPU)
_, idx = torch.sort(time_tensor.tensor.flatten())
tensor = tensor[idx]
tensor.labels = labels
return tensor
def _split_tensor_into_chunks(self, tensor):
"""
Split the tensor into chunks based on time.
:param LabelTensor tensor: The tensor to be split.
:return: A tuple containing the list of tensor chunks and the
corresponding labels.
:rtype: tuple[list[LabelTensor], list[str]]
"""
# extract labels
labels = tensor.labels
# sort input tensor based on time
tensor = self._sort_label_tensor(tensor)
# extract time tensor
time_tensor = tensor.extract(self.problem.temporal_domain.variables)
# count unique tensors in time
_, idx_split = time_tensor.unique(return_counts=True)
# split the tensor based on time
chunks = torch.split(tensor, tuple(idx_split))
return chunks, labels
def _compute_weights(self, loss):
"""
Compute the weights for the physics loss based on the cumulative loss.
:param LabelTensor loss: The physics loss values.
:return: The computed weights for the physics loss.
:rtype: LabelTensor
"""
# compute comulative loss and multiply by epsilon
cumulative_loss = self._eps * torch.cumsum(loss, dim=0)
# return the exponential of the negative weighted cumulative sum
return torch.exp(-cumulative_loss)

View File

@@ -0,0 +1,273 @@
"""Module for the Competitive PINN solver."""
import copy
import torch
from ...problem import InverseProblem
from .pinn_interface import PINNInterface
from ..solver import MultiSolverInterface
class CompetitivePINN(PINNInterface, MultiSolverInterface):
r"""
Competitive Physics-Informed Neural Network (CompetitivePINN) solver class.
This class implements the Competitive Physics-Informed Neural Network
solver, using a user specified ``model`` to solve a specific ``problem``.
It can be used to solve both forward and inverse problems.
The Competitive Physics-Informed Neural Network solver aims to find the
solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m` of a differential
problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function with respect to the model parameters, while
maximizing it with respect to the discriminator parameters:
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(D(\mathbf{x}_i)\mathcal{A}[\mathbf{u}](\mathbf{x}_i))+
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(D(\mathbf{x}_i)\mathcal{B}[\mathbf{u}](\mathbf{x}_i)),
where :math:D is the discriminator network, which identifies the points
where the model performs worst, and :math:\mathcal{L} is a specific loss
function, typically the MSE:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Zeng, Qi, et al.
*Competitive physics informed networks.*
International Conference on Learning Representations, ICLR 2022
`OpenReview Preprint <https://openreview.net/forum?id=z9SIj-IM7tn>`_.
"""
def __init__(
self,
problem,
model,
discriminator=None,
optimizer_model=None,
optimizer_discriminator=None,
scheduler_model=None,
scheduler_discriminator=None,
weighting=None,
loss=None,
):
"""
Initialization of the :class:`CompetitivePINN` class.
:param AbstractProblem problem: The problem to be solved.
:param torch.nn.Module model: The neural network model to be used.
:param torch.nn.Module discriminator: The discriminator to be used.
If `None`, the discriminator is a deepcopy of the ``model``.
Default is ``None``.
:param torch.optim.Optimizer optimizer_model: The optimizer of the
``model``. If `None`, the :class:`torch.optim.Adam` optimizer is
used. Default is ``None``.
:param torch.optim.Optimizer optimizer_discriminator: The optimizer of
the ``discriminator``. If `None`, the :class:`torch.optim.Adam`
optimizer is used. Default is ``None``.
:param Scheduler scheduler_model: Learning rate scheduler for the
``model``.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param Scheduler scheduler_discriminator: Learning rate scheduler for
the ``discriminator``.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param WeightingInterface weighting: The weighting schema to be used.
If `None`, no weighting schema is used. Default is ``None``.
:param torch.nn.Module loss: The loss function to be minimized.
If `None`, the :class:`torch.nn.MSELoss` loss is used.
Default is `None`.
"""
if discriminator is None:
discriminator = copy.deepcopy(model)
super().__init__(
models=[model, discriminator],
problem=problem,
optimizers=[optimizer_model, optimizer_discriminator],
schedulers=[scheduler_model, scheduler_discriminator],
weighting=weighting,
loss=loss,
)
# Set automatic optimization to False
self.automatic_optimization = False
def forward(self, x):
"""
Forward pass.
:param LabelTensor x: Input tensor.
:return: The output of the neural network.
:rtype: LabelTensor
"""
return self.neural_net(x)
def training_step(self, batch):
"""
Solver training step, overridden to perform manual optimization.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:return: The aggregated loss.
:rtype: LabelTensor
"""
# train model
self.optimizer_model.instance.zero_grad()
loss = super().training_step(batch)
self.manual_backward(loss)
self.optimizer_model.instance.step()
# train discriminator
self.optimizer_discriminator.instance.zero_grad()
loss = super().training_step(batch)
self.manual_backward(-loss)
self.optimizer_discriminator.instance.step()
return loss
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics-informed solver based on the
provided samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation.
:return: The computed physics loss.
:rtype: LabelTensor
"""
# Compute discriminator bets
discriminator_bets = self.discriminator(samples)
# Compute residual and multiply discriminator_bets
residual = self.compute_residual(samples=samples, equation=equation)
residual = residual * discriminator_bets
# Compute competitive residual.
loss_val = self.loss(
torch.zeros_like(residual, requires_grad=True),
residual,
)
return loss_val
def configure_optimizers(self):
"""
Optimizer configuration.
:return: The optimizers and the schedulers
:rtype: tuple[list[Optimizer], list[Scheduler]]
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized
self.optimizer_model.hook(self.neural_net.parameters())
self.optimizer_discriminator.hook(self.discriminator.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer_model.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler_model.hook(self.optimizer_model)
self.scheduler_discriminator.hook(self.optimizer_discriminator)
return (
[
self.optimizer_model.instance,
self.optimizer_discriminator.instance,
],
[
self.scheduler_model.instance,
self.scheduler_discriminator.instance,
],
)
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch and overrides
the PyTorch Lightning implementation to log checkpoints.
:param torch.Tensor outputs: The ``model``'s output for the current
batch.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:param int batch_idx: The index of the current batch.
"""
# increase by one the counter of optimization to save loggers
(
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed
) += 1
return super().on_train_batch_end(outputs, batch, batch_idx)
@property
def neural_net(self):
"""
The model.
:return: The model.
:rtype: torch.nn.Module
"""
return self.models[0]
@property
def discriminator(self):
"""
The discriminator.
:return: The discriminator.
:rtype: torch.nn.Module
"""
return self.models[1]
@property
def optimizer_model(self):
"""
The optimizer associated to the model.
:return: The optimizer for the model.
:rtype: Optimizer
"""
return self.optimizers[0]
@property
def optimizer_discriminator(self):
"""
The optimizer associated to the discriminator.
:return: The optimizer for the discriminator.
:rtype: Optimizer
"""
return self.optimizers[1]
@property
def scheduler_model(self):
"""
The scheduler associated to the model.
:return: The scheduler for the model.
:rtype: Scheduler
"""
return self.schedulers[0]
@property
def scheduler_discriminator(self):
"""
The scheduler associated to the discriminator.
:return: The scheduler for the discriminator.
:rtype: Scheduler
"""
return self.schedulers[1]

View File

@@ -0,0 +1,130 @@
"""Module for the Gradient PINN solver."""
import torch
from .pinn import PINN
from ...operator import grad
from ...problem import SpatialProblem
class GradientPINN(PINN):
r"""
Gradient Physics-Informed Neural Network (GradientPINN) solver class.
This class implements the Gradient Physics-Informed Neural Network solver,
using a user specified ``model`` to solve a specific ``problem``.
It can be used to solve both forward and inverse problems.
The Gradient Physics-Informed Neural Network solver aims to find the
solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m` of a differential
problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function;
.. math::
\mathcal{L}_{\rm{problem}} =& \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i)) +
&\frac{1}{N}\sum_{i=1}^N
\nabla_{\mathbf{x}}\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\nabla_{\mathbf{x}}\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i))
where :math:`\mathcal{L}` is a specific loss function, typically the MSE:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Yu, Jeremy, et al.
*Gradient-enhanced physics-informed neural networks for forward and
inverse PDE problems.*
Computer Methods in Applied Mechanics and Engineering 393 (2022):114823.
DOI: `10.1016 <https://doi.org/10.1016/j.cma.2022.114823>`_.
.. note::
This class is only compatible with problems that inherit from the
:class:`~pina.problem.spatial_problem.SpatialProblem` class.
"""
def __init__(
self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
):
"""
Initialization of the :class:`GradientPINN` class.
:param AbstractProblem problem: The problem to be solved.
It must inherit from at least
:class:`~pina.problem.spatial_problem.SpatialProblem` to compute the
gradient of the loss.
:param torch.nn.Module model: The neural network model to be used.
:param Optimizer optimizer: The optimizer to be used.
If `None`, the :class:`torch.optim.Adam` optimizer is used.
Default is ``None``.
:param Scheduler scheduler: Learning rate scheduler.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param WeightingInterface weighting: The weighting schema to be used.
If `None`, no weighting schema is used. Default is ``None``.
:param torch.nn.Module loss: The loss function to be minimized.
If `None`, the :class:`torch.nn.MSELoss` loss is used.
Default is `None`.
:raises ValueError: If the problem is not a SpatialProblem.
"""
super().__init__(
model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss,
)
if not isinstance(self.problem, SpatialProblem):
raise ValueError(
"Gradient PINN computes the gradient of the "
"PINN loss with respect to the spatial "
"coordinates, thus the PINA problem must be "
"a SpatialProblem."
)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics-informed solver based on the
provided samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation.
:return: The computed physics loss.
:rtype: LabelTensor
"""
# classical PINN loss
residual = self.compute_residual(samples=samples, equation=equation)
loss_value = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
# gradient PINN loss
loss_value = loss_value.reshape(-1, 1)
loss_value.labels = ["__loss"]
loss_grad = grad(loss_value, samples, d=self.problem.spatial_variables)
g_loss_phys = self.loss(
torch.zeros_like(loss_grad, requires_grad=True), loss_grad
)
return loss_value + g_loss_phys

View File

@@ -0,0 +1,121 @@
"""Module for the Physics-Informed Neural Network solver."""
import torch
from .pinn_interface import PINNInterface
from ..solver import SingleSolverInterface
from ...problem import InverseProblem
class PINN(PINNInterface, SingleSolverInterface):
r"""
Physics-Informed Neural Network (PINN) solver class.
This class implements Physics-Informed Neural Network solver, using a user
specified ``model`` to solve a specific ``problem``.
It can be used to solve both forward and inverse problems.
The Physics Informed Neural Network solver aims to find the solution
:math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m` of a differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function:
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{A}[\mathbf{u}](\mathbf{x}_i)) +
\frac{1}{N}\sum_{i=1}^N
\mathcal{L}(\mathcal{B}[\mathbf{u}](\mathbf{x}_i)),
where :math:`\mathcal{L}` is a specific loss function, typically the MSE:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: Karniadakis, G. E., Kevrekidis, I. G., Lu, L.,
Perdikaris, P., Wang, S., & Yang, L. (2021).
*Physics-informed machine learning.*
Nature Reviews Physics, 3, 422-440.
DOI: `10.1038 <https://doi.org/10.1038/s42254-021-00314-5>`_.
"""
def __init__(
self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
):
"""
Initialization of the :class:`PINN` class.
:param AbstractProblem problem: The problem to be solved.
:param torch.nn.Module model: The neural network model to be used.
:param Optimizer optimizer: The optimizer to be used.
If `None`, the :class:`torch.optim.Adam` optimizer is used.
Default is ``None``.
:param Scheduler scheduler: Learning rate scheduler.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param WeightingInterface weighting: The weighting schema to be used.
If `None`, no weighting schema is used. Default is ``None``.
:param torch.nn.Module loss: The loss function to be minimized.
If `None`, the :class:`torch.nn.MSELoss` loss is used.
Default is `None`.
"""
super().__init__(
model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss,
)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics-informed solver based on the
provided samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation.
:return: The computed physics loss.
:rtype: LabelTensor
"""
residual = self.compute_residual(samples=samples, equation=equation)
loss_value = self.loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return loss_value
def configure_optimizers(self):
"""
Optimizer configuration for the PINN solver.
:return: The optimizers and the schedulers
:rtype: tuple[list[Optimizer], list[Scheduler]]
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized.
self.optimizer.hook(self.model.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler.hook(self.optimizer)
return ([self.optimizer.instance], [self.scheduler.instance])

View File

@@ -0,0 +1,236 @@
"""Module for the Physics-Informed Neural Network Interface."""
from abc import ABCMeta, abstractmethod
import torch
from torch.nn.modules.loss import _Loss
from ..solver import SolverInterface
from ...utils import check_consistency
from ...loss.loss_interface import LossInterface
from ...problem import InverseProblem
from ...condition import (
InputTargetCondition,
InputEquationCondition,
DomainEquationCondition,
)
class PINNInterface(SolverInterface, metaclass=ABCMeta):
"""
Base class for Physics-Informed Neural Network (PINN) solvers, implementing
the :class:`~pina.solver.solver.SolverInterface` class.
The `PINNInterface` class can be used to define PINNs that work with one or
multiple optimizers and/or models. By default, it is compatible with
problems defined by :class:`~pina.problem.abstract_problem.AbstractProblem`,
and users can choose the problem type the solver is meant to address.
"""
accepted_conditions_types = (
InputTargetCondition,
InputEquationCondition,
DomainEquationCondition,
)
def __init__(self, problem, loss=None, **kwargs):
"""
Initialization of the :class:`PINNInterface` class.
:param AbstractProblem problem: The problem to be solved.
:param torch.nn.Module loss: The loss function to be minimized.
If `None`, the :class:`torch.nn.MSELoss` loss is used.
Default is `None`.
:param kwargs: Additional keyword arguments to be passed to the
:class:`~pina.solver.solver.SolverInterface` class.
"""
if loss is None:
loss = torch.nn.MSELoss()
super().__init__(problem=problem, use_lt=True, **kwargs)
# check consistency
check_consistency(loss, (LossInterface, _Loss), subclass=False)
# assign variables
self._loss = loss
# inverse problem handling
if isinstance(self.problem, InverseProblem):
self._params = self.problem.unknown_parameters
self._clamp_params = self._clamp_inverse_problem_params
else:
self._params = None
self._clamp_params = lambda: None
self.__metric = None
def optimization_cycle(self, batch):
"""
The optimization cycle for the PINN solver.
This method allows to call `_run_optimization_cycle` with the physics
loss as argument, thus distinguishing the training step from the
validation and test steps.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:return: The losses computed for all conditions in the batch, casted
to a subclass of :class:`torch.Tensor`. It should return a dict
containing the condition name and the associated scalar loss.
:rtype: dict
"""
return self._run_optimization_cycle(batch, self.loss_phys)
@torch.set_grad_enabled(True)
def validation_step(self, batch):
"""
The validation step for the PINN solver.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:return: The loss of the validation step.
:rtype: torch.Tensor
"""
losses = self._run_optimization_cycle(batch, self._residual_loss)
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
self.store_log("val_loss", loss, self.get_batch_size(batch))
return loss
@torch.set_grad_enabled(True)
def test_step(self, batch):
"""
The test step for the PINN solver.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:return: The loss of the test step.
:rtype: torch.Tensor
"""
losses = self._run_optimization_cycle(batch, self._residual_loss)
loss = self.weighting.aggregate(losses).as_subclass(torch.Tensor)
self.store_log("test_loss", loss, self.get_batch_size(batch))
return loss
def loss_data(self, input_pts, output_pts):
"""
Compute the data loss for the PINN solver by evaluating the loss
between the network's output and the true solution. This method should
not be overridden, if not intentionally.
:param LabelTensor input_pts: The input points to the neural network.
:param LabelTensor output_pts: The true solution to compare with the
network's output.
:return: The supervised loss, averaged over the number of observations.
:rtype: torch.Tensor
"""
return self._loss(self.forward(input_pts), output_pts)
@abstractmethod
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics-informed solver based on the
provided samples and equation. This method must be overridden in
subclasses. It distinguishes different types of PINN solvers.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation.
:return: The computed physics loss.
:rtype: LabelTensor
"""
def compute_residual(self, samples, equation):
"""
Compute the residuals of the equation.
:param LabelTensor samples: The samples to evaluate the loss.
:param EquationInterface equation: The governing equation.
:return: The residual of the solution of the model.
:rtype: LabelTensor
"""
try:
residual = equation.residual(samples, self.forward(samples))
except TypeError:
# this occurs when the function has three inputs (inverse problem)
residual = equation.residual(
samples, self.forward(samples), self._params
)
return residual
def _residual_loss(self, samples, equation):
"""
Compute the residual loss.
:param LabelTensor samples: The samples to evaluate the loss.
:param EquationInterface equation: The governing equation.
:return: The residual loss.
:rtype: torch.Tensor
"""
residuals = self.compute_residual(samples, equation)
return self.loss(residuals, torch.zeros_like(residuals))
def _run_optimization_cycle(self, batch, loss_residuals):
"""
Compute, given a batch, the loss for each condition and return a
dictionary with the condition name as key and the loss as value.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:param function loss_residuals: The loss function to be minimized.
:return: The losses computed for all conditions in the batch, casted
to a subclass of :class:`torch.Tensor`. It should return a dict
containing the condition name and the associated scalar loss.
:rtype: dict
"""
condition_loss = {}
for condition_name, points in batch:
self.__metric = condition_name
# if equations are passed
if "target" not in points:
input_pts = points["input"]
condition = self.problem.conditions[condition_name]
loss = loss_residuals(
input_pts.requires_grad_(), condition.equation
)
# if data are passed
else:
input_pts = points["input"]
output_pts = points["target"]
loss = self.loss_data(
input_pts=input_pts.requires_grad_(), output_pts=output_pts
)
# append loss
condition_loss[condition_name] = loss
# clamp unknown parameters in InverseProblem (if needed)
self._clamp_params()
return condition_loss
def _clamp_inverse_problem_params(self):
"""
Clamps the parameters of the inverse problem solver to specified ranges.
"""
for v in self._params:
self._params[v].data.clamp_(
self.problem.unknown_parameter_domain.range_[v][0],
self.problem.unknown_parameter_domain.range_[v][1],
)
@property
def loss(self):
"""
The loss used for training.
:return: The loss function used for training.
:rtype: torch.nn.Module
"""
return self._loss
@property
def current_condition_name(self):
"""
The current condition name.
:return: The current condition name.
:rtype: str
"""
return self.__metric

View File

@@ -0,0 +1,188 @@
"""Module for the Residual-Based Attention PINN solver."""
from copy import deepcopy
import torch
from .pinn import PINN
from ...utils import check_consistency
class RBAPINN(PINN):
r"""
Residual-based Attention Physics-Informed Neural Network (RBAPINN) solver
class. This class implements the Residual-based Attention Physics-Informed
Neural Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used to solve both forward and inverse problems.
The Residual-based Attention Physics-Informed Neural Network solver aims to
find the solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m` of a
differential problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
minimizing the loss function:
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N} \sum_{i=1}^{N_\Omega}
\lambda_{\Omega}^{i} \mathcal{L} \left( \mathcal{A}
[\mathbf{u}](\mathbf{x}) \right) + \frac{1}{N}
\sum_{i=1}^{N_{\partial\Omega}}
\lambda_{\partial\Omega}^{i} \mathcal{L}
\left( \mathcal{B}[\mathbf{u}](\mathbf{x})
\right),
denoting the weights as:
:math:`\lambda_{\Omega}^1, \dots, \lambda_{\Omega}^{N_\Omega}` and
:math:`\lambda_{\partial \Omega}^1, \dots,
\lambda_{\Omega}^{N_\partial \Omega}`
for :math:`\Omega` and :math:`\partial \Omega`, respectively.
Residual-based Attention Physics-Informed Neural Network updates the weights
of the residuals at every epoch as follows:
.. math::
\lambda_i^{k+1} \leftarrow \gamma\lambda_i^{k} +
\eta\frac{\lvert r_i\rvert}{\max_j \lvert r_j\rvert},
where :math:`r_i` denotes the residual at point :math:`i`, :math:`\gamma`
denotes the decay rate, and :math:`\eta` is the learning rate for the
weights' update.
.. seealso::
**Original reference**: Sokratis J. Anagnostopoulos, Juan D. Toscano,
Nikolaos Stergiopulos, and George E. Karniadakis.
*Residual-based attention and connection to information
bottleneck theory in PINNs.*
Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
DOI: `10.1016/j.cma.2024.116805
<https://doi.org/10.1016/j.cma.2024.116805>`_.
"""
def __init__(
self,
problem,
model,
optimizer=None,
scheduler=None,
weighting=None,
loss=None,
eta=0.001,
gamma=0.999,
):
"""
Initialization of the :class:`RBAPINN` class.
:param AbstractProblem problem: The problem to be solved.
:param torch.nn.Module model: The neural network model to be used.
:param Optimizer optimizer: The optimizer to be used.
If `None`, the :class:`torch.optim.Adam` optimizer is used.
Default is ``None``.
:param Scheduler scheduler: Learning rate scheduler.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param WeightingInterface weighting: The weighting schema to be used.
If `None`, no weighting schema is used. Default is ``None``.
:param torch.nn.Module loss: The loss function to be minimized.
If `None`, the :class:`torch.nn.MSELoss` loss is used.
Default is `None`.
:param float | int eta: The learning rate for the weights of the
residuals. Default is ``0.001``.
:param float gamma: The decay parameter in the update of the weights
of the residuals. Must be between ``0`` and ``1``.
Default is ``0.999``.
"""
super().__init__(
model=model,
problem=problem,
optimizer=optimizer,
scheduler=scheduler,
weighting=weighting,
loss=loss,
)
# check consistency
check_consistency(eta, (float, int))
check_consistency(gamma, float)
assert (
0 < gamma < 1
), f"Invalid range: expected 0 < gamma < 1, got {gamma=}"
self.eta = eta
self.gamma = gamma
# initialize weights
self.weights = {}
for condition_name in problem.conditions:
self.weights[condition_name] = 0
# define vectorial loss
self._vectorial_loss = deepcopy(self.loss)
self._vectorial_loss.reduction = "none"
# for now RBAPINN is implemented only for batch_size = None
def on_train_start(self):
"""
Hook method called at the beginning of training.
:raises NotImplementedError: If the batch size is not ``None``.
"""
if self.trainer.batch_size is not None:
raise NotImplementedError(
"RBAPINN only works with full batch "
"size, set batch_size=None inside the "
"Trainer to use the solver."
)
return super().on_train_start()
def _vect_to_scalar(self, loss_value):
"""
Computation of the scalar loss.
:param LabelTensor loss_value: the tensor of pointwise losses.
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
:return: The computed scalar loss.
:rtype: LabelTensor
"""
if self.loss.reduction == "mean":
ret = torch.mean(loss_value)
elif self.loss.reduction == "sum":
ret = torch.sum(loss_value)
else:
raise RuntimeError(
f"Invalid reduction, got {self.loss.reduction} "
"but expected mean or sum."
)
return ret
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics-informed solver based on the
provided samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation.
:return: The computed physics loss.
:rtype: LabelTensor
"""
residual = self.compute_residual(samples=samples, equation=equation)
cond = self.current_condition_name
r_norm = (
self.eta
* torch.abs(residual)
/ (torch.max(torch.abs(residual)) + 1e-12)
)
self.weights[cond] = (self.gamma * self.weights[cond] + r_norm).detach()
loss_value = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return self._vect_to_scalar(self.weights[cond] ** 2 * loss_value)

View File

@@ -0,0 +1,386 @@
"""Module for the Self-Adaptive PINN solver."""
from copy import deepcopy
import torch
from ...utils import check_consistency
from ...problem import InverseProblem
from ..solver import MultiSolverInterface
from .pinn_interface import PINNInterface
class Weights(torch.nn.Module):
"""
Implementation of the mask model for the self-adaptive weights of the
:class:`SelfAdaptivePINN` solver.
"""
def __init__(self, func):
"""
Initialization of the :class:`Weights` class.
:param torch.nn.Module func: the mask model.
"""
super().__init__()
check_consistency(func, torch.nn.Module)
self.sa_weights = torch.nn.Parameter(torch.Tensor())
self.func = func
def forward(self):
"""
Forward pass implementation for the mask module.
:return: evaluation of self adaptive weights through the mask.
:rtype: torch.Tensor
"""
return self.func(self.sa_weights)
class SelfAdaptivePINN(PINNInterface, MultiSolverInterface):
r"""
Self-Adaptive Physics-Informed Neural Network (SelfAdaptivePINN) solver
class. This class implements the Self-Adaptive Physics-Informed Neural
Network solver, using a user specified ``model`` to solve a specific
``problem``. It can be used to solve both forward and inverse problems.
The Self-Adapive Physics-Informed Neural Network solver aims to find the
solution :math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m` of a differential
problem:
.. math::
\begin{cases}
\mathcal{A}[\mathbf{u}](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
\mathcal{B}[\mathbf{u}](\mathbf{x})=0\quad,
\mathbf{x}\in\partial\Omega
\end{cases}
integrating pointwise loss evaluation using a mask :math:m and self-adaptive
weights, which allow the model to focus on regions of the domain where the
residual is higher.
The loss function to solve the problem is
.. math::
\mathcal{L}_{\rm{problem}} = \frac{1}{N} \sum_{i=1}^{N_\Omega} m
\left( \lambda_{\Omega}^{i} \right) \mathcal{L} \left( \mathcal{A}
[\mathbf{u}](\mathbf{x}) \right) + \frac{1}{N}
\sum_{i=1}^{N_{\partial\Omega}}
m \left( \lambda_{\partial\Omega}^{i} \right) \mathcal{L}
\left( \mathcal{B}[\mathbf{u}](\mathbf{x})
\right),
denoting the self adaptive weights as
:math:`\lambda_{\Omega}^1, \dots, \lambda_{\Omega}^{N_\Omega}` and
:math:`\lambda_{\partial \Omega}^1, \dots,
\lambda_{\Omega}^{N_\partial \Omega}`
for :math:`\Omega` and :math:`\partial \Omega`, respectively.
The Self-Adaptive Physics-Informed Neural Network solver identifies the
solution and appropriate self adaptive weights by solving the following
optimization problem:
.. math::
\min_{w} \max_{\lambda_{\Omega}^k, \lambda_{\partial \Omega}^s}
\mathcal{L} ,
where :math:`w` denotes the network parameters, and :math:`\mathcal{L}` is a
specific loss function, , typically the MSE:
.. math::
\mathcal{L}(v) = \| v \|^2_2.
.. seealso::
**Original reference**: McClenny, Levi D., and Ulisses M. Braga-Neto.
*Self-adaptive physics-informed neural networks.*
Journal of Computational Physics 474 (2023): 111722.
DOI: `10.1016/j.jcp.2022.111722
<https://doi.org/10.1016/j.jcp.2022.111722>`_.
"""
def __init__(
self,
problem,
model,
weight_function=torch.nn.Sigmoid(),
optimizer_model=None,
optimizer_weights=None,
scheduler_model=None,
scheduler_weights=None,
weighting=None,
loss=None,
):
"""
Initialization of the :class:`SelfAdaptivePINN` class.
:param AbstractProblem problem: The problem to be solved.
:param torch.nn.Module model: The model to be used.
:param torch.nn.Module weight_function: The Self-Adaptive mask model.
Default is ``torch.nn.Sigmoid()``.
:param Optimizer optimizer_model: The optimizer of the ``model``.
If `None`, the :class:`torch.optim.Adam` optimizer is used.
Default is ``None``.
:param Optimizer optimizer_weights: The optimizer of the
``weight_function``.
If `None`, the :class:`torch.optim.Adam` optimizer is used.
Default is ``None``.
:param Scheduler scheduler_model: Learning rate scheduler for the
``model``.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param Scheduler scheduler_weights: Learning rate scheduler for the
``weight_function``.
If `None`, the :class:`torch.optim.lr_scheduler.ConstantLR`
scheduler is used. Default is ``None``.
:param WeightingInterface weighting: The weighting schema to be used.
If `None`, no weighting schema is used. Default is ``None``.
:param torch.nn.Module loss: The loss function to be minimized.
If `None`, the :class:`torch.nn.MSELoss` loss is used.
Default is `None`.
"""
# check consistency weitghs_function
check_consistency(weight_function, torch.nn.Module)
# create models for weights
weights_dict = {}
for condition_name in problem.conditions:
weights_dict[condition_name] = Weights(weight_function)
weights_dict = torch.nn.ModuleDict(weights_dict)
super().__init__(
models=[model, weights_dict],
problem=problem,
optimizers=[optimizer_model, optimizer_weights],
schedulers=[scheduler_model, scheduler_weights],
weighting=weighting,
loss=loss,
)
# Set automatic optimization to False
self.automatic_optimization = False
self._vectorial_loss = deepcopy(self.loss)
self._vectorial_loss.reduction = "none"
def forward(self, x):
"""
Forward pass.
:param LabelTensor x: Input tensor.
:return: The output of the neural network.
:rtype: LabelTensor
"""
return self.model(x)
def training_step(self, batch):
"""
Solver training step, overridden to perform manual optimization.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:return: The aggregated loss.
:rtype: LabelTensor
"""
# Weights optimization
self.optimizer_weights.instance.zero_grad()
loss = super().training_step(batch)
self.manual_backward(-loss)
self.optimizer_weights.instance.step()
# Model optimization
self.optimizer_model.instance.zero_grad()
loss = super().training_step(batch)
self.manual_backward(loss)
self.optimizer_model.instance.step()
return loss
def configure_optimizers(self):
"""
Optimizer configuration.
:return: The optimizers and the schedulers
:rtype: tuple[list[Optimizer], list[Scheduler]]
"""
# If the problem is an InverseProblem, add the unknown parameters
# to the parameters to be optimized
self.optimizer_model.hook(self.model.parameters())
self.optimizer_weights.hook(self.weights_dict.parameters())
if isinstance(self.problem, InverseProblem):
self.optimizer_model.instance.add_param_group(
{
"params": [
self._params[var]
for var in self.problem.unknown_variables
]
}
)
self.scheduler_model.hook(self.optimizer_model)
self.scheduler_weights.hook(self.optimizer_weights)
return (
[self.optimizer_model.instance, self.optimizer_weights.instance],
[self.scheduler_model.instance, self.scheduler_weights.instance],
)
def on_train_batch_end(self, outputs, batch, batch_idx):
"""
This method is called at the end of each training batch and overrides
the PyTorch Lightning implementation to log checkpoints.
:param torch.Tensor outputs: The ``model``'s output for the current
batch.
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
tuple containing a condition name and a dictionary of points.
:param int batch_idx: The index of the current batch.
"""
# increase by one the counter of optimization to save loggers
(
self.trainer.fit_loop.epoch_loop.manual_optimization.optim_step_progress.total.completed
) += 1
return super().on_train_batch_end(outputs, batch, batch_idx)
def on_train_start(self):
"""
This method is called at the start of the training process to set the
self-adaptive weights as parameters of the mask model.
:raises NotImplementedError: If the batch size is not ``None``.
"""
if self.trainer.batch_size is not None:
raise NotImplementedError(
"SelfAdaptivePINN only works with full "
"batch size, set batch_size=None inside "
"the Trainer to use the solver."
)
device = torch.device(
self.trainer._accelerator_connector._accelerator_flag
)
# Initialize the self adaptive weights only for training points
for (
condition_name,
tensor,
) in self.trainer.data_module.train_dataset.input.items():
self.weights_dict[condition_name].sa_weights.data = torch.rand(
(tensor.shape[0], 1), device=device
)
return super().on_train_start()
def on_load_checkpoint(self, checkpoint):
"""
Override of the Pytorch Lightning ``on_load_checkpoint`` method to
handle checkpoints for Self-Adaptive Weights. This method should not be
overridden, if not intentionally.
:param dict checkpoint: Pytorch Lightning checkpoint dict.
"""
# First initialize self-adaptive weights with correct shape,
# then load the values from the checkpoint.
for condition_name, _ in self.problem.input_pts.items():
shape = checkpoint["state_dict"][
f"_pina_models.1.{condition_name}.sa_weights"
].shape
self.weights_dict[condition_name].sa_weights.data = torch.rand(
shape
)
return super().on_load_checkpoint(checkpoint)
def loss_phys(self, samples, equation):
"""
Computes the physics loss for the physics-informed solver based on the
provided samples and equation.
:param LabelTensor samples: The samples to evaluate the physics loss.
:param EquationInterface equation: The governing equation.
:return: The computed physics loss.
:rtype: LabelTensor
"""
residual = self.compute_residual(samples, equation)
weights = self.weights_dict[self.current_condition_name].forward()
loss_value = self._vectorial_loss(
torch.zeros_like(residual, requires_grad=True), residual
)
return self._vect_to_scalar(weights * loss_value)
def _vect_to_scalar(self, loss_value):
"""
Computation of the scalar loss.
:param LabelTensor loss_value: the tensor of pointwise losses.
:raises RuntimeError: If the loss reduction is not ``mean`` or ``sum``.
:return: The computed scalar loss.
:rtype: LabelTensor
"""
if self.loss.reduction == "mean":
ret = torch.mean(loss_value)
elif self.loss.reduction == "sum":
ret = torch.sum(loss_value)
else:
raise RuntimeError(
f"Invalid reduction, got {self.loss.reduction} "
"but expected mean or sum."
)
return ret
@property
def model(self):
"""
The model.
:return: The model.
:rtype: torch.nn.Module
"""
return self.models[0]
@property
def weights_dict(self):
"""
The self-adaptive weights.
:return: The self-adaptive weights.
:rtype: torch.nn.Module
"""
return self.models[1]
@property
def scheduler_model(self):
"""
The scheduler associated to the model.
:return: The scheduler for the model.
:rtype: Scheduler
"""
return self.schedulers[0]
@property
def scheduler_weights(self):
"""
The scheduler associated to the mask model.
:return: The scheduler for the mask model.
:rtype: Scheduler
"""
return self.schedulers[1]
@property
def optimizer_model(self):
"""
Returns the optimizer associated to the model.
:return: The optimizer for the model.
:rtype: Optimizer
"""
return self.optimizers[0]
@property
def optimizer_weights(self):
"""
The optimizer associated to the mask model.
:return: The optimizer for the mask model.
:rtype: Optimizer
"""
return self.optimizers[1]