Refactoring solvers (#541)
* Refactoring solvers * Simplify logic compile * Improve and update doc * Create SupervisedSolverInterface * Specialize SupervisedSolver and ReducedOrderModelSolver * Create EnsembleSolverInterface + EnsembleSupervisedSolver * Create tests ensemble solvers * formatter * codacy * fix issues + speedup test
This commit is contained in:
11
pina/solver/supervised_solver/__init__.py
Normal file
11
pina/solver/supervised_solver/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""Module for the Supervised solvers."""
|
||||
|
||||
__all__ = [
|
||||
"SupervisedSolverInterface",
|
||||
"SupervisedSolver",
|
||||
"ReducedOrderModelSolver",
|
||||
]
|
||||
|
||||
from .supervised_solver_interface import SupervisedSolverInterface
|
||||
from .supervised import SupervisedSolver
|
||||
from .reduced_order_model import ReducedOrderModelSolver
|
||||
190
pina/solver/supervised_solver/reduced_order_model.py
Normal file
190
pina/solver/supervised_solver/reduced_order_model.py
Normal file
@@ -0,0 +1,190 @@
|
||||
"""Module for the Reduced Order Model solver"""
|
||||
|
||||
import torch
|
||||
from .supervised_solver_interface import SupervisedSolverInterface
|
||||
from ..solver import SingleSolverInterface
|
||||
|
||||
|
||||
class ReducedOrderModelSolver(SupervisedSolverInterface, SingleSolverInterface):
|
||||
r"""
|
||||
Reduced Order Model solver class. This class implements the Reduced Order
|
||||
Model solver, using user specified ``reduction_network`` and
|
||||
``interpolation_network`` to solve a specific ``problem``.
|
||||
|
||||
The Reduced Order Model solver aims to find the solution
|
||||
:math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m` of a differential problem:
|
||||
|
||||
.. math::
|
||||
|
||||
\begin{cases}
|
||||
\mathcal{A}[\mathbf{u}(\mu)](\mathbf{x})=0\quad,\mathbf{x}\in\Omega\\
|
||||
\mathcal{B}[\mathbf{u}(\mu)](\mathbf{x})=0\quad,
|
||||
\mathbf{x}\in\partial\Omega
|
||||
\end{cases}
|
||||
|
||||
This is done by means of two neural networks: the ``reduction_network``,
|
||||
which defines an encoder :math:`\mathcal{E}_{\rm{net}}`, and a decoder
|
||||
:math:`\mathcal{D}_{\rm{net}}`; and the ``interpolation_network``
|
||||
:math:`\mathcal{I}_{\rm{net}}`. The input is assumed to be discretised in
|
||||
the spatial dimensions.
|
||||
|
||||
The following loss function is minimized during training:
|
||||
|
||||
.. math::
|
||||
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
|
||||
\mathcal{L}(\mathcal{E}_{\rm{net}}[\mathbf{u}(\mu_i)] -
|
||||
\mathcal{I}_{\rm{net}}[\mu_i]) +
|
||||
\mathcal{L}(
|
||||
\mathcal{D}_{\rm{net}}[\mathcal{E}_{\rm{net}}[\mathbf{u}(\mu_i)]] -
|
||||
\mathbf{u}(\mu_i))
|
||||
|
||||
where :math:`\mathcal{L}` is a specific loss function, typically the MSE:
|
||||
|
||||
.. math::
|
||||
\mathcal{L}(v) = \| v \|^2_2.
|
||||
|
||||
.. seealso::
|
||||
|
||||
**Original reference**: Hesthaven, Jan S., and Stefano Ubbiali.
|
||||
*Non-intrusive reduced order modeling of nonlinear problems using
|
||||
neural networks.*
|
||||
Journal of Computational Physics 363 (2018): 55-78.
|
||||
DOI `10.1016/j.jcp.2018.02.037
|
||||
<https://doi.org/10.1016/j.jcp.2018.02.037>`_.
|
||||
|
||||
Pichi, Federico, Beatriz Moya, and Jan S.
|
||||
Hesthaven.
|
||||
*A graph convolutional autoencoder approach to model order reduction
|
||||
for parametrized PDEs.*
|
||||
Journal of Computational Physics 501 (2024): 112762.
|
||||
DOI `10.1016/j.jcp.2024.112762
|
||||
<https://doi.org/10.1016/j.jcp.2024.112762>`_.
|
||||
|
||||
.. note::
|
||||
The specified ``reduction_network`` must contain two methods, namely
|
||||
``encode`` for input encoding, and ``decode`` for decoding the former
|
||||
result. The ``interpolation_network`` network ``forward`` output
|
||||
represents the interpolation of the latent space obtained with
|
||||
``reduction_network.encode``.
|
||||
|
||||
.. note::
|
||||
This solver uses the end-to-end training strategy, i.e. the
|
||||
``reduction_network`` and ``interpolation_network`` are trained
|
||||
simultaneously. For reference on this trainig strategy look at the
|
||||
following:
|
||||
|
||||
.. warning::
|
||||
This solver works only for data-driven model. Hence in the ``problem``
|
||||
definition the codition must only contain ``input``
|
||||
(e.g. coefficient parameters, time parameters), and ``target``.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
reduction_network,
|
||||
interpolation_network,
|
||||
loss=None,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
use_lt=True,
|
||||
):
|
||||
"""
|
||||
Initialization of the :class:`ReducedOrderModelSolver` class.
|
||||
|
||||
:param AbstractProblem problem: The formualation of the problem.
|
||||
:param torch.nn.Module reduction_network: The reduction network used
|
||||
for reducing the input space. It must contain two methods, namely
|
||||
``encode`` for input encoding, and ``decode`` for decoding the
|
||||
former result.
|
||||
:param torch.nn.Module interpolation_network: The interpolation network
|
||||
for interpolating the control parameters to latent space obtained by
|
||||
the ``reduction_network`` encoding.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If ``None``, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If ``None``, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler.
|
||||
If ``None``, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If ``None``, no weighting schema is used. Default is ``None``.
|
||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||
Default is ``True``.
|
||||
"""
|
||||
model = torch.nn.ModuleDict(
|
||||
{
|
||||
"reduction_network": reduction_network,
|
||||
"interpolation_network": interpolation_network,
|
||||
}
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
model=model,
|
||||
problem=problem,
|
||||
loss=loss,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
weighting=weighting,
|
||||
use_lt=use_lt,
|
||||
)
|
||||
|
||||
# assert reduction object contains encode/ decode
|
||||
if not hasattr(self.model["reduction_network"], "encode"):
|
||||
raise SyntaxError(
|
||||
"reduction_network must have encode method. "
|
||||
"The encode method should return a lower "
|
||||
"dimensional representation of the input."
|
||||
)
|
||||
if not hasattr(self.model["reduction_network"], "decode"):
|
||||
raise SyntaxError(
|
||||
"reduction_network must have decode method. "
|
||||
"The decode method should return a high "
|
||||
"dimensional representation of the encoding."
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
"""
|
||||
Forward pass implementation.
|
||||
It computes the encoder representation by calling the forward method
|
||||
of the ``interpolation_network`` on the input, and maps it to output
|
||||
space by calling the decode methode of the ``reduction_network``.
|
||||
|
||||
:param x: The input to the neural network.
|
||||
:type x: LabelTensor | torch.Tensor | Graph | Data
|
||||
:return: The solver solution.
|
||||
:rtype: LabelTensor | torch.Tensor | Graph | Data
|
||||
"""
|
||||
reduction_network = self.model["reduction_network"]
|
||||
interpolation_network = self.model["interpolation_network"]
|
||||
return reduction_network.decode(interpolation_network(x))
|
||||
|
||||
def loss_data(self, input, target):
|
||||
"""
|
||||
Compute the data loss by evaluating the loss between the network's
|
||||
output and the true solution. This method should not be overridden, if
|
||||
not intentionally.
|
||||
|
||||
:param input: The input to the neural network.
|
||||
:type input: LabelTensor | torch.Tensor | Graph | Data
|
||||
:param target: The target to compare with the network's output.
|
||||
:type target: LabelTensor | torch.Tensor | Graph | Data
|
||||
:return: The supervised loss, averaged over the number of observations.
|
||||
:rtype: LabelTensor | torch.Tensor | Graph | Data
|
||||
"""
|
||||
# extract networks
|
||||
reduction_network = self.model["reduction_network"]
|
||||
interpolation_network = self.model["interpolation_network"]
|
||||
# encoded representations loss
|
||||
encode_repr_inter_net = interpolation_network(input)
|
||||
encode_repr_reduction_network = reduction_network.encode(target)
|
||||
loss_encode = self._loss_fn(
|
||||
encode_repr_inter_net, encode_repr_reduction_network
|
||||
)
|
||||
# reconstruction loss
|
||||
decode = reduction_network.decode(encode_repr_reduction_network)
|
||||
loss_reconstruction = self._loss_fn(decode, target)
|
||||
return loss_encode + loss_reconstruction
|
||||
85
pina/solver/supervised_solver/supervised.py
Normal file
85
pina/solver/supervised_solver/supervised.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""Module for the Supervised solver."""
|
||||
|
||||
from .supervised_solver_interface import SupervisedSolverInterface
|
||||
from ..solver import SingleSolverInterface
|
||||
|
||||
|
||||
class SupervisedSolver(SupervisedSolverInterface, SingleSolverInterface):
|
||||
r"""
|
||||
Supervised Solver solver class. This class implements a Supervised Solver,
|
||||
using a user specified ``model`` to solve a specific ``problem``.
|
||||
|
||||
The Supervised Solver class aims to find a map between the input
|
||||
:math:`\mathbf{s}:\Omega\rightarrow\mathbb{R}^m` and the output
|
||||
:math:`\mathbf{u}:\Omega\rightarrow\mathbb{R}^m`.
|
||||
|
||||
Given a model :math:`\mathcal{M}`, the following loss function is
|
||||
minimized during training:
|
||||
|
||||
.. math::
|
||||
\mathcal{L}_{\rm{problem}} = \frac{1}{N}\sum_{i=1}^N
|
||||
\mathcal{L}(\mathbf{u}_i - \mathcal{M}(\mathbf{s}_i)),
|
||||
|
||||
where :math:`\mathcal{L}` is a specific loss function, typically the MSE:
|
||||
|
||||
.. math::
|
||||
\mathcal{L}(v) = \| v \|^2_2.
|
||||
|
||||
In this context, :math:`\mathbf{u}_i` and :math:`\mathbf{s}_i` indicates
|
||||
the will to approximate multiple (discretised) functions given multiple
|
||||
(discretised) input functions.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
problem,
|
||||
model,
|
||||
loss=None,
|
||||
optimizer=None,
|
||||
scheduler=None,
|
||||
weighting=None,
|
||||
use_lt=True,
|
||||
):
|
||||
"""
|
||||
Initialization of the :class:`SupervisedSolver` class.
|
||||
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
:param torch.nn.Module model: The neural network model to be used.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If ``None``, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param Optimizer optimizer: The optimizer to be used.
|
||||
If ``None``, the :class:`torch.optim.Adam` optimizer is used.
|
||||
Default is ``None``.
|
||||
:param Scheduler scheduler: Learning rate scheduler.
|
||||
If ``None``, the :class:`torch.optim.lr_scheduler.ConstantLR`
|
||||
scheduler is used. Default is ``None``.
|
||||
:param WeightingInterface weighting: The weighting schema to be used.
|
||||
If ``None``, no weighting schema is used. Default is ``None``.
|
||||
:param bool use_lt: If ``True``, the solver uses LabelTensors as input.
|
||||
Default is ``True``.
|
||||
"""
|
||||
super().__init__(
|
||||
model=model,
|
||||
problem=problem,
|
||||
loss=loss,
|
||||
optimizer=optimizer,
|
||||
scheduler=scheduler,
|
||||
weighting=weighting,
|
||||
use_lt=use_lt,
|
||||
)
|
||||
|
||||
def loss_data(self, input, target):
|
||||
"""
|
||||
Compute the data loss for the Supervised solver by evaluating the loss
|
||||
between the network's output and the true solution. This method should
|
||||
not be overridden, if not intentionally.
|
||||
|
||||
:param input: The input to the neural network.
|
||||
:type input: LabelTensor | torch.Tensor | Graph | Data
|
||||
:param target: The target to compare with the network's output.
|
||||
:type target: LabelTensor | torch.Tensor | Graph | Data
|
||||
:return: The supervised loss, averaged over the number of observations.
|
||||
:rtype: LabelTensor | torch.Tensor | Graph | Data
|
||||
"""
|
||||
return self._loss_fn(self.forward(input), target)
|
||||
90
pina/solver/supervised_solver/supervised_solver_interface.py
Normal file
90
pina/solver/supervised_solver/supervised_solver_interface.py
Normal file
@@ -0,0 +1,90 @@
|
||||
"""Module for the Supervised solver interface."""
|
||||
|
||||
from abc import abstractmethod
|
||||
|
||||
import torch
|
||||
|
||||
from torch.nn.modules.loss import _Loss
|
||||
from ..solver import SolverInterface
|
||||
from ...utils import check_consistency
|
||||
from ...loss.loss_interface import LossInterface
|
||||
from ...condition import InputTargetCondition
|
||||
|
||||
|
||||
class SupervisedSolverInterface(SolverInterface):
|
||||
r"""
|
||||
Base class for Supervised solvers. This class implements a Supervised Solver
|
||||
, using a user specified ``model`` to solve a specific ``problem``.
|
||||
|
||||
The ``SupervisedSolverInterface`` class can be used to define
|
||||
Supervised solvers that work with one or multiple optimizers and/or models.
|
||||
By default, it is compatible with problems defined by
|
||||
:class:`~pina.problem.abstract_problem.AbstractProblem`,
|
||||
and users can choose the problem type the solver is meant to address.
|
||||
"""
|
||||
|
||||
accepted_conditions_types = InputTargetCondition
|
||||
|
||||
def __init__(self, loss=None, **kwargs):
|
||||
"""
|
||||
Initialization of the :class:`SupervisedSolver` class.
|
||||
|
||||
:param AbstractProblem problem: The problem to be solved.
|
||||
:param torch.nn.Module loss: The loss function to be minimized.
|
||||
If ``None``, the :class:`torch.nn.MSELoss` loss is used.
|
||||
Default is `None`.
|
||||
:param kwargs: Additional keyword arguments to be passed to the
|
||||
:class:`~pina.solver.solver.SolverInterface` class.
|
||||
"""
|
||||
if loss is None:
|
||||
loss = torch.nn.MSELoss()
|
||||
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# check consistency
|
||||
check_consistency(loss, (LossInterface, _Loss), subclass=False)
|
||||
|
||||
# assign variables
|
||||
self._loss_fn = loss
|
||||
|
||||
def optimization_cycle(self, batch):
|
||||
"""
|
||||
The optimization cycle for the solvers.
|
||||
|
||||
:param list[tuple[str, dict]] batch: A batch of data. Each element is a
|
||||
tuple containing a condition name and a dictionary of points.
|
||||
:return: The losses computed for all conditions in the batch, casted
|
||||
to a subclass of :class:`torch.Tensor`. It should return a dict
|
||||
containing the condition name and the associated scalar loss.
|
||||
:rtype: dict
|
||||
"""
|
||||
condition_loss = {}
|
||||
for condition_name, points in batch:
|
||||
condition_loss[condition_name] = self.loss_data(
|
||||
input=points["input"], target=points["target"]
|
||||
)
|
||||
return condition_loss
|
||||
|
||||
@abstractmethod
|
||||
def loss_data(self, input, target):
|
||||
"""
|
||||
Compute the data loss for the Supervised. This method is abstract and
|
||||
should be override by derived classes.
|
||||
|
||||
:param input: The input to the neural network.
|
||||
:type input: LabelTensor | torch.Tensor | Graph | Data
|
||||
:param target: The target to compare with the network's output.
|
||||
:type target: LabelTensor | torch.Tensor | Graph | Data
|
||||
:return: The supervised loss, averaged over the number of observations.
|
||||
:rtype: LabelTensor | torch.Tensor | Graph | Data
|
||||
"""
|
||||
|
||||
@property
|
||||
def loss(self):
|
||||
"""
|
||||
The loss function to be minimized.
|
||||
|
||||
:return: The loss function to be minimized.
|
||||
:rtype: torch.nn.Module
|
||||
"""
|
||||
return self._loss_fn
|
||||
Reference in New Issue
Block a user