fix doc model part 1

This commit is contained in:
giovanni
2025-03-14 12:24:27 +01:00
committed by Nicola Demo
parent def8f5a1d3
commit 8dc682c849
10 changed files with 676 additions and 433 deletions

View File

@@ -1,5 +1,5 @@
""" """
Module containing the neural network models. Module for the Neural model classes.
""" """
__all__ = [ __all__ = [

View File

@@ -1,4 +1,4 @@
"""Module Averaging Neural Operator.""" """Module for the Averaging Neural Operator model class."""
import torch import torch
from torch import nn from torch import nn
@@ -9,19 +9,17 @@ from ..utils import check_consistency
class AveragingNeuralOperator(KernelNeuralOperator): class AveragingNeuralOperator(KernelNeuralOperator):
""" """
Implementation of Averaging Neural Operator. Averaging Neural Operator model class.
Averaging Neural Operator is a general architecture for The Averaging Neural Operator is a general architecture for learning
learning Operators. Unlike traditional machine learning methods operators, which map functions to functions. It can be trained both with
AveragingNeuralOperator is designed to map entire functions Supervised and Physics-Informed learning strategies. The Averaging Neural
to other functions. It can be trained with Supervised learning strategies. Operator performs convolution by means of a field average.
AveragingNeuralOperator does convolution by performing a field average.
.. seealso:: .. seealso::
**Original reference**: Lanthaler S. Li, Z., Kovachki, **Original reference**: Lanthaler S., Li, Z., Stuart, A. (2020).
Stuart, A. (2020). *The Nonlocal Neural Operator: *The Nonlocal Neural Operator: Universal Approximation*.
Universal Approximation*.
DOI: `arXiv preprint arXiv:2304.13221. DOI: `arXiv preprint arXiv:2304.13221.
<https://arxiv.org/abs/2304.13221>`_ <https://arxiv.org/abs/2304.13221>`_
""" """
@@ -36,21 +34,26 @@ class AveragingNeuralOperator(KernelNeuralOperator):
func=nn.GELU, func=nn.GELU,
): ):
""" """
:param torch.nn.Module lifting_net: The neural network for lifting Initialization of the :class:`AveragingNeuralOperator` class.
the input. It must take as input the input field and the coordinates
at which the input field is avaluated. The output of the lifting :param torch.nn.Module lifting_net: The lifting neural network mapping
net is chosen as embedding dimension of the problem the input to its hidden dimension. It must take as input the input
:param torch.nn.Module projecting_net: The neural network for field and the coordinates at which the input field is evaluated.
projecting the output. It must take as input the embedding dimension :param torch.nn.Module projecting_net: The projection neural network
(output of the ``lifting_net``) plus the dimension mapping the hidden representation to the output function. It must
of the coordinates. take as input the embedding dimension plus the dimension of the
:param list[str] field_indices: the label of the fields coordinates.
in the input tensor. :param list[str] field_indices: The labels of the fields in the input
:param list[str] coordinates_indices: the label of the tensor.
coordinates in the input tensor. :param list[str] coordinates_indices: The labels of the coordinates in
:param int n_layers: number of hidden layers. Default is 4. the input tensor.
:param torch.nn.Module func: the activation function to use, :param int n_layers: The number of hidden layers. Default is ``4``.
default to torch.nn.GELU. :param torch.nn.Module func: The activation function to use.
Default is :class:`torch.nn.GELU`.
:raises ValueError: If the input dimension does not match with the
labels of the fields and coordinates.
:raises ValueError: If the input dimension of the projecting network
does not match with the hidden dimension of the lifting network.
""" """
# check consistency # check consistency
@@ -93,19 +96,20 @@ class AveragingNeuralOperator(KernelNeuralOperator):
def forward(self, x): def forward(self, x):
r""" r"""
Forward computation for Averaging Neural Operator. It performs a Forward pass for the :class:`AveragingNeuralOperator` model.
lifting of the input by the ``lifting_net``. Then different layers
of Averaging Neural Operator Blocks are applied.
Finally the output is projected to the final dimensionality
by the ``projecting_net``.
:param torch.Tensor x: The input tensor for fourier block, The ``lifting_net`` maps the input to the hidden dimension.
depending on ``dimension`` in the initialization. It expects Then, several layers of
a tensor :math:`B \times N \times D`, :class:`~pina.model.block.average_neural_operator_block.AVNOBlock` are
where :math:`B` is the batch_size, :math:`N` the number of points applied. Finally, the ``projection_net`` maps the hidden representation
in the mesh, :math:`D` the dimension of the problem, i.e. the sum to the output function.
of ``len(coordinates_indices)+len(field_indices)``.
:return: The output tensor obtained from Average Neural Operator. :param LabelTensor x: The input tensor for performing the computation.
It expects a tensor :math:`B \times N \times D`, where :math:`B` is
the batch_size, :math:`N` the number of points in the mesh,
:math:`D` the dimension of the problem, i.e. the sum
of ``len(coordinates_indices)`` and ``len(field_indices)``.
:return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
points_tmp = x.extract(self.coordinates_indices) points_tmp = x.extract(self.coordinates_indices)

View File

@@ -1,4 +1,4 @@
"""Module for DeepONet model""" """Module for the DeepONet and MIONet model classes"""
from functools import partial from functools import partial
import torch import torch
@@ -8,22 +8,18 @@ from ..utils import check_consistency, is_function
class MIONet(torch.nn.Module): class MIONet(torch.nn.Module):
""" """
The PINA implementation of MIONet network. MIONet model class.
MIONet is a general architecture for learning Operators defined The MIONet is a general architecture for learning operators, which map
on the tensor product of Banach spaces. Unlike traditional machine functions to functions. It can be trained with both Supervised and
learning methods MIONet is designed to map entire functions to other Physics-Informed learning strategies.
functions. It can be trained both with Physics Informed or Supervised
learning strategies.
.. seealso:: .. seealso::
**Original reference**: Jin, Pengzhan, Shuai Meng, and Lu Lu. **Original reference**: Jin, P., Meng, S., and Lu L. (2022).
*MIONet: Learning multiple-input operators via tensor product.* *MIONet: Learning multiple-input operators via tensor product.*
SIAM Journal on Scientific Computing 44.6 (2022): A3490-A351 SIAM Journal on Scientific Computing 44.6 (2022): A3490-A351
DOI: `10.1137/22M1477751 DOI: `10.1137/22M1477751 <https://doi.org/10.1137/22M1477751>`_
<https://doi.org/10.1137/22M1477751>`_
""" """
def __init__( def __init__(
@@ -35,42 +31,44 @@ class MIONet(torch.nn.Module):
translation=True, translation=True,
): ):
""" """
:param dict networks: The neural networks to use as Initialization of the :class:`MIONet` class.
models. The ``dict`` takes as key a neural network, and
as value the list of indeces to extract from the input variable :param dict networks: The neural networks to use as models. The ``dict``
in the forward pass of the neural network. If a list of ``int`` takes as key a neural network, and as value the list of indeces to
is passed, the corresponding columns of the inner most entries are extract from the input variable in the forward pass of the neural
extracted. network. If a ``list[int]`` is passed, the corresponding columns of
If a list of ``str`` is passed the variables of the corresponding the inner most entries are extracted. If a ``list[str]`` is passed
:py:obj:`pina.label_tensor.LabelTensor`are extracted. The the variables of the corresponding
``torch.nn.Module`` model has to take as input a :class:`~pina.label_tensor.LabelTensor` are extracted.
:py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. Each :class:`torch.nn.Module` model has to take as input either a
Default implementation consist of different branch nets and one :class:`~pina.label_tensor.LabelTensor` or a :class:`torch.Tensor`.
Default implementation consists of several branch nets and one
trunk nets. trunk nets.
:param str or Callable aggregator: Aggregator to be used to aggregate :param aggregator: The aggregator to be used to aggregate component-wise
partial results from the modules in `nets`. Partial results are partial results from the modules in ``networks``. Available
aggregated component-wise. Available aggregators include aggregators include: sum: ``+``, product: ``*``, mean: ``mean``,
sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: min: ``min``, max: ``max``. Default is ``*``.
``max``. :type aggregator: str or Callable
:param str or Callable reduction: Reduction to be used to reduce :param reduction: The reduction to be used to reduce the aggregated
the aggregated result of the modules in `nets` to the desired output result of the modules in ``networks`` to the desired output
dimension. Available reductions include dimension. Available reductions include: sum: ``+``, product: ``*``,
sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: mean: ``mean``, min: ``min``, max: ``max``. Default is ``+``.
``max``. :type reduction: str or Callable
:param bool or Callable scale: Scaling the final output before returning :param bool scale: If ``True``, the final output is scaled before being
the forward pass, default ``True``. returned in the forward pass. Default is ``True``.
:param bool or Callable translation: Translating the final output before :param bool translation: If ``True``, the final output is translated
returning the forward pass, default ``True``. before being returned in the forward pass. Default is ``True``.
:raises ValueError: If the passed networks have not the same output
dimension.
.. warning:: .. warning::
In the forward pass we do not check if the input is instance of No checks are performed in the forward pass to verify if the input
:py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. is instance of either :class:`~pina.label_tensor.LabelTensor` or
A general rule is that for a :py:obj:`pina.label_tensor.LabelTensor` :class:`torch.Tensor`. In general, in case of a
input both list of integers and list of strings can be passed for :class:`~pina.label_tensor.LabelTensor`, both a ``list[int]`` or a
``input_indeces_branch_net``and ``input_indeces_trunk_net``. ``list[str]`` can be passed as ``networks`` dict values.
Differently, for a :class:`torch.Tensor` only a list of integers Differently, in case of a :class:`torch.Tensor`, only a
can be passed for ``input_indeces_branch_net``and ``list[int]`` can be passed as ``networks`` dict values.
``input_indeces_trunk_net``.
:Example: :Example:
>>> branch_net1 = FeedForward(input_dimensons=1, >>> branch_net1 = FeedForward(input_dimensons=1,
@@ -162,6 +160,10 @@ class MIONet(torch.nn.Module):
""" """
Return a dictionary of functions that can be used as aggregators or Return a dictionary of functions that can be used as aggregators or
reductions. reductions.
:param dict kwargs: Additional parameters.
:return: A dictionary of functions.
:rtype: dict
""" """
return { return {
"+": partial(torch.sum, **kwargs), "+": partial(torch.sum, **kwargs),
@@ -172,6 +174,13 @@ class MIONet(torch.nn.Module):
} }
def _init_aggregator(self, aggregator): def _init_aggregator(self, aggregator):
"""
Initialize the aggregator.
:param aggregator: The aggregator to be used to aggregate.
:type aggregator: str or Callable
:raises ValueError: If the aggregator is not supported.
"""
aggregator_funcs = self._symbol_functions(dim=2) aggregator_funcs = self._symbol_functions(dim=2)
if aggregator in aggregator_funcs: if aggregator in aggregator_funcs:
aggregator_func = aggregator_funcs[aggregator] aggregator_func = aggregator_funcs[aggregator]
@@ -184,6 +193,13 @@ class MIONet(torch.nn.Module):
self._aggregator_type = aggregator self._aggregator_type = aggregator
def _init_reduction(self, reduction): def _init_reduction(self, reduction):
"""
Initialize the reduction.
:param reduction: The reduction to be used.
:type reduction: str or Callable
:raises ValueError: If the reduction is not supported.
"""
reduction_funcs = self._symbol_functions(dim=-1) reduction_funcs = self._symbol_functions(dim=-1)
if reduction in reduction_funcs: if reduction in reduction_funcs:
reduction_func = reduction_funcs[reduction] reduction_func = reduction_funcs[reduction]
@@ -196,6 +212,18 @@ class MIONet(torch.nn.Module):
self._reduction_type = reduction self._reduction_type = reduction
def _get_vars(self, x, indeces): def _get_vars(self, x, indeces):
"""
Extract the variables from the input tensor.
:param x: The input tensor.
:type x: LabelTensor | torch.Tensor
:param indeces: The indeces to extract.
:type indeces: list[int] | list[str]
:raises RuntimeError: If failing to extract the variables.
:raises RuntimeError: If failing to extract the right indeces.
:return: The extracted variables.
:rtype: LabelTensor | torch.Tensor
"""
if isinstance(indeces[0], str): if isinstance(indeces[0], str):
try: try:
return x.extract(indeces) return x.extract(indeces)
@@ -216,12 +244,12 @@ class MIONet(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Defines the computation performed at every call. Forward pass for the :class:`MIONet` model.
:param LabelTensor or torch.Tensor x: The input tensor for the forward :param x: The input tensor.
call. :type x: LabelTensor | torch.Tensor
:return: The output computed by the DeepONet model. :return: The output tensor.
:rtype: LabelTensor or torch.Tensor :rtype: LabelTensor | torch.Tensor
""" """
# forward pass # forward pass
@@ -248,13 +276,19 @@ class MIONet(torch.nn.Module):
def aggregator(self): def aggregator(self):
""" """
The aggregator function. The aggregator function.
:return: The aggregator function.
:rtype: str or Callable
""" """
return self._aggregator return self._aggregator
@property @property
def reduction(self): def reduction(self):
""" """
The translation factor. The reduction function.
:return: The reduction function.
:rtype: str or Callable
""" """
return self._reduction return self._reduction
@@ -262,13 +296,19 @@ class MIONet(torch.nn.Module):
def scale(self): def scale(self):
""" """
The scale factor. The scale factor.
:return: The scale factor.
:rtype: torch.Tensor
""" """
return self._scale return self._scale
@property @property
def translation(self): def translation(self):
""" """
The translation factor for MIONet. The translation factor.
:return: The translation factor.
:rtype: torch.Tensor
""" """
return self._trasl return self._trasl
@@ -276,6 +316,9 @@ class MIONet(torch.nn.Module):
def indeces_variables_extracted(self): def indeces_variables_extracted(self):
""" """
The input indeces for each model in form of list. The input indeces for each model in form of list.
:return: The indeces for each model.
:rtype: list
""" """
return self._indeces return self._indeces
@@ -283,24 +326,27 @@ class MIONet(torch.nn.Module):
def model(self): def model(self):
""" """
The models in form of list. The models in form of list.
:return: The models.
:rtype: list[torch.nn.Module]
""" """
return self._indeces return self._indeces
class DeepONet(MIONet): class DeepONet(MIONet):
""" """
The PINA implementation of DeepONet network. DeepONet model class.
DeepONet is a general architecture for learning Operators. Unlike The MIONet is a general architecture for learning operators, which map
traditional machine learning methods DeepONet is designed to map functions to functions. It can be trained with both Supervised and
entire functions to other functions. It can be trained both with Physics-Informed learning strategies.
Physics Informed or Supervised learning strategies.
.. seealso:: .. seealso::
**Original reference**: Lu, L., Jin, P., Pang, G. et al. *Learning **Original reference**: Lu, L., Jin, P., Pang, G. et al.
nonlinear operators via DeepONet based on the universal approximation *Learning nonlinear operators via DeepONet based on the universal
theorem of operator*. Nat Mach Intell 3, 218229 (2021). approximation theorem of operator*.
Nat Mach Intell 3, 218-229 (2021).
DOI: `10.1038/s42256-021-00302-5 DOI: `10.1038/s42256-021-00302-5
<https://doi.org/10.1038/s42256-021-00302-5>`_ <https://doi.org/10.1038/s42256-021-00302-5>`_
@@ -318,42 +364,44 @@ class DeepONet(MIONet):
translation=True, translation=True,
): ):
""" """
Initialization of the :class:`DeepONet` class.
:param torch.nn.Module branch_net: The neural network to use as branch :param torch.nn.Module branch_net: The neural network to use as branch
model. It has to take as input a model. It has to take as input either a
:py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. :class:`~pina.label_tensor.LabelTensor` or a :class:`torch.Tensor`.
The number of dimensions of the output has to be the same of the The output dimension has to be the same as that of ``trunk_net``.
``trunk_net``.
:param torch.nn.Module trunk_net: The neural network to use as trunk :param torch.nn.Module trunk_net: The neural network to use as trunk
model. It has to take as input a model. It has to take as input either a
:py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. :class:`~pina.label_tensor.LabelTensor` or a :class:`torch.Tensor`.
The number of dimensions of the output has to be the same of the The output dimension has to be the same as that of ``branch_net``.
``branch_net``. :param input_indeces_branch_net: List of indeces to extract from the
:param list(int) or list(str) input_indeces_branch_net: List of indeces input variable of the ``branch_net``.
to extract from the input variable in the forward pass for the If a list of ``int`` is passed, the corresponding columns of the
branch net. If a list of ``int`` is passed, the corresponding inner most entries are extracted. If a list of ``str`` is passed the
columns of the inner most entries are extracted. If a list of variables of the corresponding
``str`` is passed the variables of the corresponding :class:`~pina.label_tensor.LabelTensor` are extracted.
:py:obj:`pina.label_tensor.LabelTensor` are extracted. :type input_indeces_branch_net: list[int] | list[str]
:param list(int) or list(str) input_indeces_trunk_net: List of indeces :param input_indeces_trunk_net: List of indeces to extract from the
to extract from the input variable in the forward pass for the input variable of the ``trunk_net``.
trunk net. If a list of ``int`` is passed, the corresponding columns If a list of ``int`` is passed, the corresponding columns of the
of the inner most entries are extracted. If a list of ``str`` is inner most entries are extracted. If a list of ``str`` is passed the
passed the variables of the corresponding variables of the corresponding
:py:obj:`pina.label_tensor.LabelTensor` are extracted. :class:`~pina.label_tensor.LabelTensor` are extracted.
:param str or Callable aggregator: Aggregator to be used to aggregate :type input_indeces_trunk_net: list[int] | list[str]
partial results from the modules in `nets`. Partial results are :param aggregator: The aggregator to be used to aggregate component-wise
aggregated component-wise. Available aggregators include partial results from the modules in ``networks``. Available
sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, aggregators include: sum: ``+``, product: ``*``, mean: ``mean``,
max: ``max``. min: ``min``, max: ``max``. Default is ``*``.
:param str or Callable reduction: Reduction to be used to reduce :type aggregator: str or Callable
the aggregated result of the modules in `nets` to the desired output :param reduction: The reduction to be used to reduce the aggregated
dimension. Available reductions include result of the modules in ``networks`` to the desired output
sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, dimension. Available reductions include: sum: ``+``, product: ``*``,
max: ``max``. mean: ``mean``, min: ``min``, max: ``max``. Default is ``+``.
:param bool or Callable scale: Scaling the final output before returning :type reduction: str or Callable
the forward pass, default True. :param bool scale: If ``True``, the final output is scaled before being
:param bool or Callable translation: Translating the final output before returned in the forward pass. Default is ``True``.
returning the forward pass, default True. :param bool translation: If ``True``, the final output is translated
before being returned in the forward pass. Default is ``True``.
.. warning:: .. warning::
In the forward pass we do not check if the input is instance of In the forward pass we do not check if the input is instance of
@@ -364,6 +412,14 @@ class DeepONet(MIONet):
Differently, for a :class:`torch.Tensor` only a list of integers can Differently, for a :class:`torch.Tensor` only a list of integers can
be passed for ``input_indeces_branch_net`` and be passed for ``input_indeces_branch_net`` and
``input_indeces_trunk_net``. ``input_indeces_trunk_net``.
.. warning::
No checks are performed in the forward pass to verify if the input
is instance of either :class:`~pina.label_tensor.LabelTensor` or
:class:`torch.Tensor`. In general, in case of a
:class:`~pina.label_tensor.LabelTensor`, both a ``list[int]`` or a
``list[str]`` can be passed as ``input_indeces_branch_net`` and
``input_indeces_trunk_net``. Differently, in case of a
:class:`torch.Tensor`, only a ``list[int]`` can be passed.
:Example: :Example:
>>> branch_net = FeedForward(input_dimensons=1, >>> branch_net = FeedForward(input_dimensons=1,
@@ -411,25 +467,31 @@ class DeepONet(MIONet):
def forward(self, x): def forward(self, x):
""" """
Defines the computation performed at every call. Forward pass for the :class:`DeepONet` model.
:param LabelTensor or torch.Tensor x: The input tensor for the forward :param x: The input tensor.
call. :type x: LabelTensor | torch.Tensor
:return: The output computed by the DeepONet model. :return: The output tensor.
:rtype: LabelTensor or torch.Tensor :rtype: LabelTensor | torch.Tensor
""" """
return super().forward(x) return super().forward(x)
@property @property
def branch_net(self): def branch_net(self):
""" """
The branch net for DeepONet. The branch net of the DeepONet.
:return: The branch net.
:rtype: torch.nn.Module
""" """
return self.models[0] return self.models[0]
@property @property
def trunk_net(self): def trunk_net(self):
""" """
The trunk net for DeepONet. The trunk net of the DeepONet.
:return: The trunk net.
:rtype: torch.nn.Module
""" """
return self.models[1] return self.models[1]

View File

@@ -1,4 +1,4 @@
"""Module for FeedForward model""" """Module for the Feed Forward model class"""
import torch import torch
from torch import nn from torch import nn
@@ -8,28 +8,8 @@ from .block.residual import EnhancedLinear
class FeedForward(torch.nn.Module): class FeedForward(torch.nn.Module):
""" """
The PINA implementation of feedforward network, also refered as multilayer Feed Forward neural network model class, also known as Multi-layer
perceptron. Perceptron.
:param int input_dimensions: The number of input components of the model.
Expected tensor shape of the form :math:`(*, d)`, where *
means any number of dimensions including none, and :math:`d` the
``input_dimensions``.
:param int output_dimensions: The number of output components of the model.
Expected tensor shape of the form :math:`(*, d)`, where *
means any number of dimensions including none, and :math:`d` the
``output_dimensions``.
:param int inner_size: number of neurons in the hidden layer(s). Default is
20.
:param int n_layers: number of hidden layers. Default is 2.
:param torch.nn.Module func: the activation function to use. If a single
:class:`torch.nn.Module` is passed, this is used as activation function
after any layers, except the last one. If a list of Modules is passed,
they are used as activation functions at any layers, in order.
:param list(int) | tuple(int) layers: a list containing the number of
neurons for any hidden layers. If specified, the parameters ``n_layers``
and ``inner_size`` are not considered.
:param bool bias: If ``True`` the MLP will consider some bias.
""" """
def __init__( def __init__(
@@ -42,7 +22,36 @@ class FeedForward(torch.nn.Module):
layers=None, layers=None,
bias=True, bias=True,
): ):
""" """ """
Initialization of the :class:`FeedForward` class.
:param int input_dimensions: The number of input components.
The expected tensor shape is :math:`(*, d)`, where *
represents any number of preceding dimensions (including none), and
:math:`d` corresponds to ``input_dimensions``.
:param int output_dimensions: The number of output components .
The expected tensor shape is :math:`(*, d)`, where *
represents any number of preceding dimensions (including none), and
:math:`d` corresponds to ``output_dimensions``.
:param int inner_size: The number of neurons for each hidden layer.
Default is ``20``.
:param int n_layers: The number of hidden layers. Default is ``2``.
::param func: The activation function. If a list is passed, it must have
the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`.
:type func: torch.nn.Module | list[torch.nn.Module]
:param list[int] layers: The list of the dimension of inner layers.
If ``None``, ``n_layers`` of dimension ``inner_size`` are used.
Otherwise, it overrides the values passed to ``n_layers`` and
``inner_size``. Default is ``None``.
:param bool bias: If ``True`` bias is considered for the basis function
neural network. Default is ``True``.
:raises ValueError: If the input dimension is not an integer.
:raises ValueError: If the output dimension is not an integer.
:raises RuntimeError: If the number of layers and functions are
inconsistent.
"""
super().__init__() super().__init__()
if not isinstance(input_dimensions, int): if not isinstance(input_dimensions, int):
@@ -71,7 +80,7 @@ class FeedForward(torch.nn.Module):
self.functions = [func for _ in range(len(self.layers) - 1)] self.functions = [func for _ in range(len(self.layers) - 1)]
if len(self.layers) != len(self.functions) + 1: if len(self.layers) != len(self.functions) + 1:
raise RuntimeError("uncosistent number of layers and functions") raise RuntimeError("Incosistent number of layers and functions")
unique_list = [] unique_list = []
for layer, func_ in zip(self.layers[:-1], self.functions): for layer, func_ in zip(self.layers[:-1], self.functions):
@@ -84,52 +93,31 @@ class FeedForward(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Defines the computation performed at every call. Forward pass for the :class:`FeedForward` model.
:param x: The tensor to apply the forward pass. :param x: The input tensor.
:type x: torch.Tensor :type x: torch.Tensor | LabelTensor
:return: the output computed by the model. :return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor | LabelTensor
""" """
return self.model(x) return self.model(x)
class ResidualFeedForward(torch.nn.Module): class ResidualFeedForward(torch.nn.Module):
""" """
The PINA implementation of feedforward network, also with skipped connection Residual Feed Forward neural network model class.
and transformer network, as presented in **Understanding and mitigating
gradient pathologies in physics-informed neural networks** The model is composed of a series of linear layers with a residual
connection between themm as presented in the following:
.. seealso:: .. seealso::
**Original reference**: Wang, Sifan, Yujun Teng, and Paris Perdikaris. **Original reference**: Wang, S., Teng, Y., and Perdikaris, P. (2021).
*Understanding and mitigating gradient flow pathologies in *Understanding and mitigating gradient flow pathologies in
physics-informed neural networks*. SIAM Journal on Scientific Computing physics-informed neural networks*.
43.5 (2021): A3055-A3081. SIAM Journal on Scientific Computing 43.5 (2021): A3055-A3081.
DOI: `10.1137/20M1318043 DOI: `10.1137/20M1318043
<https://epubs.siam.org/doi/abs/10.1137/20M1318043>`_ <https://epubs.siam.org/doi/abs/10.1137/20M1318043>`_
:param int input_dimensions: The number of input components of the model.
Expected tensor shape of the form :math:`(*, d)`, where *
means any number of dimensions including none, and :math:`d` the
``input_dimensions``.
:param int output_dimensions: The number of output components of the model.
Expected tensor shape of the form :math:`(*, d)`, where *
means any number of dimensions including none, and :math:`d` the
``output_dimensions``.
:param int inner_size: number of neurons in the hidden layer(s). Default is
20.
:param int n_layers: number of hidden layers. Default is 2.
:param torch.nn.Module func: the activation function to use. If a single
:class:`torch.nn.Module` is passed, this is used as activation function
after any layers, except the last one. If a list of Modules is passed,
they are used as activation functions at any layers, in order.
:param bool bias: If ``True`` the MLP will consider some bias.
:param list | tuple transformer_nets: a list or tuple containing the two
torch.nn.Module which act as transformer network. The input dimension
of the network must be the same as ``input_dimensions``, and the output
dimension must be the same as ``inner_size``.
""" """
def __init__( def __init__(
@@ -142,7 +130,37 @@ class ResidualFeedForward(torch.nn.Module):
bias=True, bias=True,
transformer_nets=None, transformer_nets=None,
): ):
""" """ """
Initialization of the :class:`ResidualFeedForward` class.
:param int input_dimensions: The number of input components.
The expected tensor shape is :math:`(*, d)`, where *
represents any number of preceding dimensions (including none), and
:math:`d` corresponds to ``input_dimensions``.
:param int output_dimensions: The number of output components .
The expected tensor shape is :math:`(*, d)`, where *
represents any number of preceding dimensions (including none), and
:math:`d` corresponds to ``output_dimensions``.
:param int inner_size: The number of neurons for each hidden layer.
Default is ``20``.
:param int n_layers: The number of hidden layers. Default is ``2``.
::param func: The activation function. If a list is passed, it must have
the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`.
:type func: torch.nn.Module | list[torch.nn.Module]
:param bool bias: If ``True`` bias is considered for the basis function
neural network. Default is ``True``.
:param transformer_nets: The two :class:`torch.nn.Module` acting as
transformer network. The input dimension of both networks must be
equal to ``input_dimensions``, and the output dimension must be
equal to ``inner_size``. If ``None``, two
:class:`~pina.model.block.residual.EnhancedLinear` layers are used.
Default is ``None``.
:type transformer_nets: list[torch.nn.Module] | tuple[torch.nn.Module]
:raises RuntimeError: If the number of layers and functions are
inconsistent.
"""
super().__init__() super().__init__()
# check type consistency # check type consistency
@@ -179,7 +197,7 @@ class ResidualFeedForward(torch.nn.Module):
self.functions = [func() for _ in range(len(self.layers))] self.functions = [func() for _ in range(len(self.layers))]
if len(self.layers) != len(self.functions): if len(self.layers) != len(self.functions):
raise RuntimeError("uncosistent number of layers and functions") raise RuntimeError("Incosistent number of layers and functions")
unique_list = [] unique_list = []
for layer, func_ in zip(self.layers, self.functions): for layer, func_ in zip(self.layers, self.functions):
@@ -188,12 +206,12 @@ class ResidualFeedForward(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Defines the computation performed at every call. Forward pass for the :class:`ResidualFeedForward` model.
:param x: The tensor to apply the forward pass. :param x: The input tensor.
:type x: torch.Tensor :type x: torch.Tensor | LabelTensor
:return: the output computed by the model. :return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor | LabelTensor
""" """
# enhance the input with transformer # enhance the input with transformer
input_ = [] input_ = []
@@ -210,6 +228,26 @@ class ResidualFeedForward(torch.nn.Module):
@staticmethod @staticmethod
def _check_transformer_nets(transformer_nets, input_dimensions, inner_size): def _check_transformer_nets(transformer_nets, input_dimensions, inner_size):
"""
Check the transformer networks consistency.
:param transformer_nets: The two :class:`torch.nn.Module` acting as
transformer network.
:type transformer_nets: list[torch.nn.Module] | tuple[torch.nn.Module]
:param int input_dimensions: The number of input components.
:param int inner_size: The number of neurons for each hidden layer.
:raises ValueError: If the passed ``transformer_nets`` is not a list of
length two.
:raises ValueError: If the passed ``transformer_nets`` is not a list of
:class:`torch.nn.Module`.
:raises ValueError: If the input dimension of the transformer network
is incompatible with the input dimension of the model.
:raises ValueError: If the output dimension of the transformer network
is incompatible with the inner size of the model.
:raises RuntimeError: If unexpected error occurs.
:return: The two :class:`torch.nn.Module` acting as transformer network.
:rtype: list[torch.nn.Module] | tuple[torch.nn.Module]
"""
# check transformer nets # check transformer nets
if transformer_nets is None: if transformer_nets is None:
transformer_nets = [ transformer_nets = [

View File

@@ -1,5 +1,5 @@
""" """
Fourier Neural Operator Module. Module for the Fourier Neural Operator model class.
""" """
import warnings import warnings
@@ -13,18 +13,16 @@ from .kernel_neural_operator import KernelNeuralOperator
class FourierIntegralKernel(torch.nn.Module): class FourierIntegralKernel(torch.nn.Module):
""" """
Implementation of Fourier Integral Kernel network. Fourier Integral Kernel model class.
This class implements the Fourier Integral Kernel network, which is a This class implements the Fourier Integral Kernel network, which
PINA implementation of Fourier Neural Operator kernel network. performs global convolution in the Fourier space.
It performs global convolution by operating in the Fourier space.
.. seealso:: .. seealso::
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K., Liu,
K., Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
(2020). *Fourier neural operator for parametric partial *Fourier neural operator for parametric partial differential equations*.
differential equations*.
DOI: `arXiv preprint arXiv:2010.08895. DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895>`_ <https://arxiv.org/abs/2010.08895>`_
""" """
@@ -43,16 +41,31 @@ class FourierIntegralKernel(torch.nn.Module):
layers=None, layers=None,
): ):
""" """
:param int input_numb_fields: Number of input fields. Initialization of the :class:`FourierIntegralKernel` class.
:param int output_numb_fields: Number of output fields.
:param int | list[int] n_modes: Number of modes. :param int input_numb_fields: The number of input fields.
:param int dimensions: Number of dimensions (1, 2, or 3). :param int output_numb_fields: The number of output fields.
:param int padding: Padding size, defaults to 8. :param n_modes: The number of modes.
:param str padding_type: Type of padding, defaults to "constant". :type n_modes: int | list[int]
:param int inner_size: Inner size, defaults to 20. :param int dimensions: The number of dimensions. It can be set to ``1``,
:param int n_layers: Number of layers, defaults to 2. ``2``, or ``3``. Default is ``3``.
:param torch.nn.Module func: Activation function, defaults to nn.Tanh. :param int padding: The padding size. Default is ``8``.
:param list[int] layers: List of layer sizes, defaults to None. :param str padding_type: The padding strategy. Default is ``constant``.
:param int inner_size: The inner size. Default is ``20``.
:param int n_layers: The number of layers. Default is ``2``.
:param func: The activation function. If a list is passed, it must have
the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`.
:type func: torch.nn.Module | list[torch.nn.Module]
:param list[int] layers: The list of the dimension of inner layers.
If ``None``, ``n_layers`` of dimension ``inner_size`` are used.
Otherwise, it overrides the values passed to ``n_layers`` and
``inner_size``. Default is ``None``.
:raises RuntimeError: If the number of layers and functions are
inconsistent.
:raises RunTimeError: If the number of layers and modes are
inconsistent.
""" """
super().__init__() super().__init__()
@@ -84,7 +97,7 @@ class FourierIntegralKernel(torch.nn.Module):
if isinstance(func, list): if isinstance(func, list):
if len(layers) != len(func): if len(layers) != len(func):
raise RuntimeError( raise RuntimeError(
"Uncosistent number of layers and functions." "Inconsistent number of layers and functions."
) )
_functions = func _functions = func
else: else:
@@ -97,7 +110,7 @@ class FourierIntegralKernel(torch.nn.Module):
n_modes n_modes
): ):
raise RuntimeError( raise RuntimeError(
"Uncosistent number of layers and functions." "Inconsistent number of layers and modes."
) )
if all(isinstance(i, int) for i in n_modes): if all(isinstance(i, int) for i in n_modes):
n_modes = [n_modes] * len(layers) n_modes = [n_modes] * len(layers)
@@ -129,19 +142,17 @@ class FourierIntegralKernel(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward computation for Fourier Neural Operator. It performs a Forward pass for the :class:`FourierIntegralKernel` model.
lifting of the input by the ``lifting_net``. Then different layers
of Fourier Blocks are applied. Finally the output is projected
to the final dimensionality by the ``projecting_net``.
:param torch.Tensor x: The input tensor for fourier block,
depending on ``dimension`` in the initialization.
In particular it is expected:
:param x: The input tensor for performing the computation. Depending
on the ``dimensions`` in the initialization, it expects a tensor
with the following shapes:
* 1D tensors: ``[batch, X, channels]`` * 1D tensors: ``[batch, X, channels]``
* 2D tensors: ``[batch, X, Y, channels]`` * 2D tensors: ``[batch, X, Y, channels]``
* 3D tensors: ``[batch, X, Y, Z, channels]`` * 3D tensors: ``[batch, X, Y, Z, channels]``
:return: The output tensor obtained from the kernels convolution. :type x: torch.Tensor | LabelTensor
:raises Warning: If a LabelTensor is passed as input.
:return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
if isinstance(x, LabelTensor): if isinstance(x, LabelTensor):
@@ -181,6 +192,22 @@ class FourierIntegralKernel(torch.nn.Module):
layers, layers,
n_modes, n_modes,
): ):
"""
Check the consistency of the input parameters.
:param int dimensions: The number of dimensions.
:param int padding: The padding size.
:param str padding_type: The padding strategy.
:param int inner_size: The inner size.
:param int n_layers: The number of layers.
:param func: The activation function.
:type func: torch.nn.Module | list[torch.nn.Module]
:param list[int] layers: The list of the dimension of inner layers.
:param n_modes: The number of modes.
:type n_modes: int | list[int]
:raises ValueError: If the input is not consistent.
"""
check_consistency(dimensions, int) check_consistency(dimensions, int)
check_consistency(padding, int) check_consistency(padding, int)
check_consistency(padding_type, str) check_consistency(padding_type, str)
@@ -201,6 +228,15 @@ class FourierIntegralKernel(torch.nn.Module):
@staticmethod @staticmethod
def _get_fourier_block(dimensions): def _get_fourier_block(dimensions):
"""
Retrieve the Fourier Block class based on the number of dimensions.
:param int dimensions: The number of dimensions.
:raises NotImplementedError: If the number of dimensions is not 1, 2,
or 3.
:return: The Fourier Block class.
:rtype: FourierBlock1D | FourierBlock2D | FourierBlock3D
"""
if dimensions == 1: if dimensions == 1:
return FourierBlock1D return FourierBlock1D
if dimensions == 2: if dimensions == 2:
@@ -212,20 +248,18 @@ class FourierIntegralKernel(torch.nn.Module):
class FNO(KernelNeuralOperator): class FNO(KernelNeuralOperator):
""" """
The PINA implementation of Fourier Neural Operator network. Fourier Neural Operator model class.
Fourier Neural Operator (FNO) is a general architecture for The Fourier Neural Operator (FNO) is a general architecture for learning
learning Operators. Unlike traditional machine learning methods operators, which map functions to functions. It can be trained both with
FNO is designed to map entire functions to other functions. It Supervised and Physics_Informed learning strategies. The Fourier Neural
can be trained with Supervised learning strategies. FNO does global Operator performs global convolution in the Fourier space.
convolution by performing the operation on the Fourier space.
.. seealso:: .. seealso::
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
K., Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
(2020). *Fourier neural operator for parametric partial *Fourier neural operator for parametric partial differential equations*.
differential equations*.
DOI: `arXiv preprint arXiv:2010.08895. DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895>`_ <https://arxiv.org/abs/2010.08895>`_
""" """
@@ -244,18 +278,27 @@ class FNO(KernelNeuralOperator):
layers=None, layers=None,
): ):
""" """
:param torch.nn.Module lifting_net: The neural network for lifting param torch.nn.Module lifting_net: The lifting neural network mapping
the input. the input to its hidden dimension.
:param torch.nn.Module projecting_net: The neural network for :param torch.nn.Module projecting_net: The projection neural network
projecting the output. mapping the hidden representation to the output function.
:param int | list[int] n_modes: Number of modes. :param n_modes: The number of modes.
:param int dimensions: Number of dimensions (1, 2, or 3). :type n_modes: int | list[int]
:param int padding: Padding size, defaults to 8. :param int dimensions: The number of dimensions. It can be set to ``1``,
:param str padding_type: Type of padding, defaults to `constant`. ``2``, or ``3``. Default is ``3``.
:param int inner_size: Inner size, defaults to 20. :param int padding: The padding size. Default is ``8``.
:param int n_layers: Number of layers, defaults to 2. :param str padding_type: The padding strategy. Default is ``constant``.
:param torch.nn.Module func: Activation function, defaults to nn.Tanh. :param int inner_size: The inner size. Default is ``20``.
:param list[int] layers: List of layer sizes, defaults to None. :param int n_layers: The number of layers. Default is ``2``.
:param func: The activation function. If a list is passed, it must have
the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`.
:type func: torch.nn.Module | list[torch.nn.Module]
:param list[int] layers: The list of the dimension of inner layers.
If ``None``, ``n_layers`` of dimension ``inner_size`` are used.
Otherwise, it overrides the values passed to ``n_layers`` and
``inner_size``. Default is ``None``.
""" """
lifting_operator_out = lifting_net( lifting_operator_out = lifting_net(
torch.rand(size=next(lifting_net.parameters()).size()) torch.rand(size=next(lifting_net.parameters()).size())
@@ -279,19 +322,21 @@ class FNO(KernelNeuralOperator):
def forward(self, x): def forward(self, x):
""" """
Forward computation for Fourier Neural Operator. It performs a Forward pass for the :class:`FourierNeuralOperator` model.
lifting of the input by the ``lifting_net``. Then different layers
of Fourier Blocks are applied. Finally the output is projected
to the final dimensionality by the ``projecting_net``.
:param torch.Tensor x: The input tensor for fourier block, The ``lifting_net`` maps the input to the hidden dimension.
depending on ``dimension`` in the initialization. In Then, several layers of Fourier blocks are applied. Finally, the
particular it is expected: ``projection_net`` maps the hidden representation to the output
function.
: param x: The input tensor for performing the computation. Depending
on the ``dimensions`` in the initialization, it expects a tensor
with the following shapes:
* 1D tensors: ``[batch, X, channels]`` * 1D tensors: ``[batch, X, channels]``
* 2D tensors: ``[batch, X, Y, channels]`` * 2D tensors: ``[batch, X, Y, channels]``
* 3D tensors: ``[batch, X, Y, Z, channels]`` * 3D tensors: ``[batch, X, Y, Z, channels]``
:return: The output tensor obtained from FNO. :type x: torch.Tensor | LabelTensor
:return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """

View File

@@ -1,5 +1,5 @@
""" """
Module for the Graph Neural Operator and Graph Neural Kernel. Module for the Graph Neural Operator model class.
""" """
import torch import torch
@@ -10,7 +10,18 @@ from .kernel_neural_operator import KernelNeuralOperator
class GraphNeuralKernel(torch.nn.Module): class GraphNeuralKernel(torch.nn.Module):
""" """
TODO add docstring Graph Neural Kernel model class.
This class implements the Graph Neural Kernel network.
.. seealso::
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
Liu, B., Bhattacharya, K., Stuart, A., Anandkumar, A. (2020).
*Neural Operator: Graph Kernel Network for Partial Differential
Equations*.
DOI: `arXiv preprint arXiv:2003.03485.
<https://arxiv.org/abs/2003.03485>`_
""" """
def __init__( def __init__(
@@ -26,28 +37,24 @@ class GraphNeuralKernel(torch.nn.Module):
shared_weights=False, shared_weights=False,
): ):
""" """
The Graph Neural Kernel constructor. Initialization of the :class:`GraphNeuralKernel` class.
:param width: The width of the kernel. :param int width: The width of the kernel.
:type width: int :param int edge_features: The number of edge features.
:param edge_features: The number of edge features. :param int n_layers: The number of kernel layers. Default is ``2``.
:type edge_features: int :param int internal_n_layers: The number of layers of the neural network
:param n_layers: The number of kernel layers. inside each kernel layer. Default is ``0``.
:type n_layers: int :param internal_layers: The number of neurons for each layer of the
:param internal_n_layers: The number of layers the FF Neural Network neural network inside each kernel layer. Default is ``None``.
internal to each Kernel Layer. :type internal_layers: list[int] | tuple[int]
:type internal_n_layers: int :param torch.nn.Module internal_func: The activation function used
:param internal_layers: Number of neurons of hidden layers(s) in the inside each kernel layer. If ``None``, it uses the
FF Neural Network inside for each Kernel Layer. :class:`torch.nn.Tanh`. activation. Default is ``None``.
:type internal_layers: list | tuple :param torch.nn.Module external_func: The activation function applied to
:param internal_func: The activation function used inside the the output of the each kernel layer. If ``None``, it uses the
computation of the representation of the edge features in the :class:`torch.nn.Tanh`. activation. Default is ``None``.
Graph Integral Layer. :param bool shared_weights: If ``True``, the weights of each kernel
:param external_func: The activation function applied to the output of layer are shared. Default is ``False``.
the Graph Integral Layer.
:type external_func: torch.nn.Module
:param shared_weights: If ``True`` the weights of the Graph Integral
Layers are shared.
""" """
super().__init__() super().__init__()
if external_func is None: if external_func is None:
@@ -85,11 +92,33 @@ class GraphNeuralKernel(torch.nn.Module):
self._forward_func = self._forward_unshared self._forward_func = self._forward_unshared
def _forward_unshared(self, x, edge_index, edge_attr): def _forward_unshared(self, x, edge_index, edge_attr):
"""
Forward pass for the Graph Neural Kernel with unshared weights.
:param x: The input tensor.
:type x: torch.Tensor | LabelTensor
:param torch.Tensor edge_index: The edge index.
:param edge_attr: The edge attributes.
:type edge_attr: torch.Tensor | LabelTensor
:return: The output tensor.
:rtype: torch.Tensor
"""
for layer in self.layers: for layer in self.layers:
x = layer(x, edge_index, edge_attr) x = layer(x, edge_index, edge_attr)
return x return x
def _forward_shared(self, x, edge_index, edge_attr): def _forward_shared(self, x, edge_index, edge_attr):
"""
Forward pass for the Graph Neural Kernel with shared weights.
:param x: The input tensor.
:type x: torch.Tensor | LabelTensor
:param torch.Tensor edge_index: The edge index.
:param edge_attr: The edge attributes.
:type edge_attr: torch.Tensor | LabelTensor
:return: The output tensor.
:rtype: torch.Tensor
"""
for _ in range(self.n_layers): for _ in range(self.n_layers):
x = self.layers(x, edge_index, edge_attr) x = self.layers(x, edge_index, edge_attr)
return x return x
@@ -98,19 +127,34 @@ class GraphNeuralKernel(torch.nn.Module):
""" """
The forward pass of the Graph Neural Kernel. The forward pass of the Graph Neural Kernel.
:param x: The input batch. :param x: The input tensor.
:type x: torch.Tensor :type x: torch.Tensor | LabelTensor
:param edge_index: The edge index. :param torch.Tensor edge_index: The edge index.
:type edge_index: torch.Tensor
:param edge_attr: The edge attributes. :param edge_attr: The edge attributes.
:type edge_attr: torch.Tensor :type edge_attr: torch.Tensor | LabelTensor
:return: The output tensor.
:rtype: torch.Tensor
""" """
return self._forward_func(x, edge_index, edge_attr) return self._forward_func(x, edge_index, edge_attr)
class GraphNeuralOperator(KernelNeuralOperator): class GraphNeuralOperator(KernelNeuralOperator):
""" """
TODO add docstring Graph Neural Operator model class.
The Graph Neural Operator is a general architecture for learning operators,
which map functions to functions. It can be trained both with Supervised
and Physics-Informed learning strategies. The Graph Neural Operator performs
graph convolution by means of a Graph Neural Kernel.
.. seealso::
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
Liu, B., Bhattacharya, K., Stuart, A., Anandkumar, A. (2020).
*Neural Operator: Graph Kernel Network for Partial Differential
Equations*.
DOI: `arXiv preprint arXiv:2003.03485.
<https://arxiv.org/abs/2003.03485>`_
""" """
def __init__( def __init__(
@@ -127,34 +171,29 @@ class GraphNeuralOperator(KernelNeuralOperator):
shared_weights=True, shared_weights=True,
): ):
""" """
The Graph Neural Operator constructor. Initialization of the :class:`GraphNeuralOperator` class.
:param lifting_operator: The lifting operator mapping the node features param torch.nn.Module lifting_operator: The lifting neural network
to its hidden dimension. mapping the input to its hidden dimension.
:type lifting_operator: torch.nn.Module :param torch.nn.Module projection_operator: The projection neural
:param projection_operator: The projection operator mapping the hidden network mapping the hidden representation to the output function.
representation of the nodes features to the output function. :param int edge_features: The number of edge features.
:type projection_operator: torch.nn.Module :param int n_layers: The number of kernel layers. Default is ``10``.
:param edge_features: Number of edge features. :param int internal_n_layers: The number of layers of the neural network
:type edge_features: int inside each kernel layer. Default is ``0``.
:param n_layers: The number of kernel layers. :param int inner_size: The size of the hidden layers of the neural
:type n_layers: int network inside each kernel layer. Default is ``None``.
:param internal_n_layers: The number of layers the Feed Forward Neural :param internal_layers: The number of neurons for each layer of the
Network internal to each Kernel Layer. neural network inside each kernel layer. Default is ``None``.
:type internal_n_layers: int :type internal_layers: list[int] | tuple[int]
:param internal_layers: Number of neurons of hidden layers(s) in the :param torch.nn.Module internal_func: The activation function used
FF Neural Network inside for each Kernel Layer. inside each kernel layer. If ``None``, it uses the
:type internal_layers: list | tuple :class:`torch.nn.Tanh`. activation. Default is ``None``.
:param internal_func: The activation function used inside the :param torch.nn.Module external_func: The activation function applied to
computation of the representation of the edge features in the the output of the each kernel layer. If ``None``, it uses the
Graph Integral Layer. :class:`torch.nn.Tanh`. activation. Default is ``None``.
:type internal_func: torch.nn.Module :param bool shared_weights: If ``True``, the weights of each kernel
:param external_func: The activation function applied to the output of layer are shared. Default is ``False``.
the Graph Integral Kernel.
:type external_func: torch.nn.Module
:param shared_weights: If ``True`` the weights of the Graph Integral
Layers are shared.
:type shared_weights: bool
""" """
if internal_func is None: if internal_func is None:
@@ -182,8 +221,9 @@ class GraphNeuralOperator(KernelNeuralOperator):
""" """
The forward pass of the Graph Neural Operator. The forward pass of the Graph Neural Operator.
:param x: The input batch. :param torch_geometric.data.Batch x: The input graph.
:type x: torch_geometric.data.Batch :return: The output tensor.
:rtype: torch.Tensor
""" """
x, edge_index, edge_attr = x.x, x.edge_index, x.edge_attr x, edge_index, edge_attr = x.x, x.edge_index, x.edge_attr
x = self.lifting_operator(x) x = self.lifting_operator(x)

View File

@@ -1,5 +1,5 @@
""" """
Kernel Neural Operator Module. Module for the Kernel Neural Operator model class.
""" """
import torch import torch
@@ -8,13 +8,14 @@ from ..utils import check_consistency
class KernelNeuralOperator(torch.nn.Module): class KernelNeuralOperator(torch.nn.Module):
r""" r"""
Base class for composing Neural Operators with integral kernels. Base class for Neural Operators with integral kernels.
This is a base class for composing neural operator with multiple This class serves as a foundation for building Neural Operators that
integral kernels. All neural operator models defined in PINA inherit incorporate multiple integral kernels. All Neural Operator models in
from this class. The structure is inspired by the work of Kovachki, N. PINA inherit from this class. The design follows the framework proposed
et al. see Figure 2 of the reference for extra details. The Neural by Kovachki et al., as illustrated in Figure 2 of their work.
Operators inheriting from this class can be written as:
Neural Operators derived from this class can be expressed as:
.. math:: .. math::
G_\theta := P \circ K_m \circ \cdot \circ K_1 \circ L G_\theta := P \circ K_m \circ \cdot \circ K_1 \circ L
@@ -40,15 +41,18 @@ class KernelNeuralOperator(torch.nn.Module):
**Original reference**: Kovachki, N., Li, Z., Liu, B., **Original reference**: Kovachki, N., Li, Z., Liu, B.,
Azizzadenesheli, K., Bhattacharya, K., Stuart, A., & Anandkumar, A. Azizzadenesheli, K., Bhattacharya, K., Stuart, A., & Anandkumar, A.
(2023). *Neural operator: Learning maps between function (2023).
spaces with applications to PDEs*. Journal of Machine Learning *Neural operator: Learning maps between function spaces with
Research, 24(89), 1-97. applications to PDEs*.
Journal of Machine Learning Research, 24(89), 1-97.
""" """
def __init__(self, lifting_operator, integral_kernels, projection_operator): def __init__(self, lifting_operator, integral_kernels, projection_operator):
""" """
:param torch.nn.Module lifting_operator: The lifting operator Initialization of the :class:`KernelNeuralOperator` class.
mapping the input to its hidden dimension.
:param torch.nn.Module lifting_operator: The lifting operator mapping
the input to its hidden dimension.
:param torch.nn.Module integral_kernels: List of integral kernels :param torch.nn.Module integral_kernels: List of integral kernels
mapping each hidden representation to the next one. mapping each hidden representation to the next one.
:param torch.nn.Module projection_operator: The projection operator :param torch.nn.Module projection_operator: The projection operator
@@ -64,16 +68,19 @@ class KernelNeuralOperator(torch.nn.Module):
@property @property
def lifting_operator(self): def lifting_operator(self):
""" """
The lifting operator property. The lifting operator module.
:return: The lifting operator module.
:rtype: torch.nn.Module
""" """
return self._lifting_operator return self._lifting_operator
@lifting_operator.setter @lifting_operator.setter
def lifting_operator(self, value): def lifting_operator(self, value):
""" """
The lifting operator setter Set the lifting operator module.
:param torch.nn.Module value: The lifting operator torch module. :param torch.nn.Module value: The lifting operator module.
""" """
check_consistency(value, torch.nn.Module) check_consistency(value, torch.nn.Module)
self._lifting_operator = value self._lifting_operator = value
@@ -81,16 +88,19 @@ class KernelNeuralOperator(torch.nn.Module):
@property @property
def projection_operator(self): def projection_operator(self):
""" """
The projection operator property. The projection operator module.
:return: The projection operator module.
:rtype: torch.nn.Module
""" """
return self._projection_operator return self._projection_operator
@projection_operator.setter @projection_operator.setter
def projection_operator(self, value): def projection_operator(self, value):
""" """
The projection operator setter Set the projection operator module.
:param torch.nn.Module value: The projection operator torch module. :param torch.nn.Module value: The projection operator module.
""" """
check_consistency(value, torch.nn.Module) check_consistency(value, torch.nn.Module)
self._projection_operator = value self._projection_operator = value
@@ -98,37 +108,41 @@ class KernelNeuralOperator(torch.nn.Module):
@property @property
def integral_kernels(self): def integral_kernels(self):
""" """
The integral kernels operator property. The integral kernels operator module.
:return: The integral kernels operator module.
:rtype: torch.nn.Module
""" """
return self._integral_kernels return self._integral_kernels
@integral_kernels.setter @integral_kernels.setter
def integral_kernels(self, value): def integral_kernels(self, value):
""" """
The integral kernels operator setter Set the integral kernels operator module.
:param torch.nn.Module value: The integral kernels operator torch :param torch.nn.Module value: The integral kernels operator module.
module.
""" """
check_consistency(value, torch.nn.Module) check_consistency(value, torch.nn.Module)
self._integral_kernels = value self._integral_kernels = value
def forward(self, x): def forward(self, x):
r""" r"""
Forward computation for Base Neural Operator. It performs a Forward pass for the :class:`KernelNeuralOperator` model.
lifting of the input by the ``lifting_operator``.
Then different layers integral kernels are applied using
``integral_kernels``. Finally the output is projected
to the final dimensionality by the ``projection_operator``.
:param torch.Tensor x: The input tensor for performing the The ``lifting_operator`` maps the input to the hidden dimension.
computation. It expects a tensor :math:`B \times N \times D`, The ``integral_kernels`` apply the integral kernels to the hidden
where :math:`B` is the batch_size, :math:`N` the number of points representation. The ``projection_operator`` maps the hidden
in the mesh, :math:`D` the dimension of the problem. In particular representation to the output function.
:math:`D` is the number of spatial/paramtric/temporal variables
plus the field variables. For example for 2D problems with 2 :param x: The input tensor for performing the computation. It expects
output\ variables :math:`D=4`. a tensor :math:`B \times N \times D`, where :math:`B` is the
:return: The output tensor obtained from the NO. batch_size, :math:`N` the number of points in the mesh, and
:math:`D` the dimension of the problem. In particular, :math:`D`
is the number of spatial, parametric, and/or temporal variables
plus the field variables. For instance, for 2D problems with 2
output variables, :math:`D=4`.
:type x: torch.Tensor | LabelTensor
:return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
x = self.lifting_operator(x) x = self.lifting_operator(x)

View File

@@ -1,4 +1,4 @@
"""Module LowRank Neural Operator.""" """Module for the Low Rank Neural Operator model class."""
import torch import torch
from torch import nn from torch import nn
@@ -11,23 +11,20 @@ from .block.low_rank_block import LowRankBlock
class LowRankNeuralOperator(KernelNeuralOperator): class LowRankNeuralOperator(KernelNeuralOperator):
""" """
Implementation of LowRank Neural Operator. Low Rank Neural Operator model class.
LowRank Neural Operator is a general architecture for The Low Rank Neural Operator is a general architecture for learning
learning Operators. Unlike traditional machine learning methods operators, which map functions to functions. It can be trained both with
LowRankNeuralOperator is designed to map entire functions Supervised and Physics-Informed learning strategies. The Low Rank Neural
to other functions. It can be trained with Supervised or PINN based Operator performs convolution by means of a low rank approximation.
learning strategies.
LowRankNeuralOperator does convolution by performing a low rank
approximation, see :class:`~pina.model.block.lowrank_layer.LowRankBlock`.
.. seealso:: .. seealso::
**Original reference**: Kovachki, N., Li, Z., Liu, B., **Original reference**: Kovachki, N., Li, Z., Liu, B., Azizzadenesheli,
Azizzadenesheli, K., Bhattacharya, K., Stuart, A., & Anandkumar, A. K., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2023).
(2023). *Neural operator: Learning maps between function *Neural operator: Learning maps between function spaces with
spaces with applications to PDEs*. Journal of Machine Learning applications to PDEs*.
Research, 24(89), 1-97. Journal of Machine Learning Research, 24(89), 1-97.
""" """
def __init__( def __init__(
@@ -44,32 +41,35 @@ class LowRankNeuralOperator(KernelNeuralOperator):
bias=True, bias=True,
): ):
""" """
:param torch.nn.Module lifting_net: The neural network for lifting Initialization of the :class:`LowRankNeuralOperator` class.
the input. It must take as input the input field and the coordinates
at which the input field is avaluated. The output of the lifting :param torch.nn.Module lifting_net: The lifting neural network mapping
net is chosen as embedding dimension of the problem the input to its hidden dimension. It must take as input the input
:param torch.nn.Module projecting_net: The neural network for field and the coordinates at which the input field is evaluated.
projecting the output. It must take as input the embedding dimension :param torch.nn.Module projecting_net: The projection neural network
(output of the ``lifting_net``) plus the dimension mapping the hidden representation to the output function. It must
of the coordinates. take as input the embedding dimension plus the dimension of the
:param list[str] field_indices: the label of the fields coordinates.
in the input tensor. :param list[str] field_indices: The labels of the fields in the input
:param list[str] coordinates_indices: the label of the tensor.
coordinates in the input tensor. :param list[str] coordinates_indices: The labels of the coordinates in
:param int n_kernel_layers: number of hidden kernel layers. the input tensor.
Default is 4. :param int n_kernel_layers: The number of hidden kernel layers.
:param int inner_size: Number of neurons in the hidden layer(s) for the :param int rank: The rank of the low rank approximation.
basis function network. Default is 20. :param int inner_size: The number of neurons for each hidden layer in
:param int n_layers: Number of hidden layers. for the the basis function neural network. Default is ``20``.
basis function network. Default is 2. :param int n_layers: The number of hidden layers in the basis function
:param func: The activation function to use for the neural network. Default is ``2``.
basis function network. If a single :param func: The activation function. If a list is passed, it must have
:class:`torch.nn.Module` is passed, this is used as the same length as ``n_layers``. If a single function is passed, it
activation function after any layers, except the last one. is used for all layers, except for the last one.
If a list of Modules is passed, Default is :class:`torch.nn.Tanh`.
they are used as activation functions at any layers, in order. :param bool bias: If ``True`` bias is considered for the basis function
:param bool bias: If ``True`` the MLP will consider some bias for the neural network. Default is ``True``.
basis function network. :raises ValueError: If the input dimension does not match with the
labels of the fields and coordinates.
:raises ValueError: If the input dimension of the projecting network
does not match with the hidden dimension of the lifting network.
""" """
# check consistency # check consistency
@@ -122,19 +122,20 @@ class LowRankNeuralOperator(KernelNeuralOperator):
def forward(self, x): def forward(self, x):
r""" r"""
Forward computation for LowRank Neural Operator. It performs a Forward pass for the :class:`LowRankNeuralOperator` model.
lifting of the input by the ``lifting_net``. Then different layers
of LowRank Neural Operator Blocks are applied.
Finally the output is projected to the final dimensionality
by the ``projecting_net``.
:param torch.Tensor x: The input tensor for fourier block, The ``lifting_net`` maps the input to the hidden dimension.
depending on ``dimension`` in the initialization. It expects Then, several layers of
a tensor :math:`B \times N \times D`, :class:`~pina.model.block.low_rank_block.LowRankBlock` are
where :math:`B` is the batch_size, :math:`N` the number of points applied. Finally, the ``projecting_net`` maps the hidden representation
in the mesh, :math:`D` the dimension of the problem, i.e. the sum to the output function.
of ``len(coordinates_indices)+len(field_indices)``.
:return: The output tensor obtained from Average Neural Operator. :param LabelTensor x: The input tensor for performing the computation.
It expects a tensor :math:`B \times N \times D`, where :math:`B` is
the batch_size, :math:`N` the number of points in the mesh,
:math:`D` the dimension of the problem, i.e. the sum
of ``len(coordinates_indices)`` and ``len(field_indices)``.
:return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
# extract points # extract points

View File

@@ -1,4 +1,4 @@
"""Module for Multi FeedForward model""" """Module for the Multi Feed Forward model class"""
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
import torch import torch
@@ -7,16 +7,21 @@ from .feed_forward import FeedForward
class MultiFeedForward(torch.nn.Module, ABC): class MultiFeedForward(torch.nn.Module, ABC):
""" """
The PINA implementation of MultiFeedForward network. Multi Feed Forward neural network model class.
This model allows to create a network with multiple FeedForward combined This model allows to create a network with multiple Feed Forward neural
together. The user has to define the `forward` method choosing how to networks combined together. The user is required to define the ``forward``
combine the different FeedForward networks. method to choose how to combine the networks.
:param dict ffn_dict: dictionary of FeedForward networks.
""" """
def __init__(self, ffn_dict): def __init__(self, ffn_dict):
"""
Initialization of the :class:`MultiFeedForward` class.
:param dict ffn_dict: A dictionary containing the Feed Forward neural
networks to be combined.
:raises TypeError: If the input is not a dictionary.
"""
super().__init__() super().__init__()
if not isinstance(ffn_dict, dict): if not isinstance(ffn_dict, dict):
@@ -28,5 +33,8 @@ class MultiFeedForward(torch.nn.Module, ABC):
@abstractmethod @abstractmethod
def forward(self, *args, **kwargs): def forward(self, *args, **kwargs):
""" """
TODO: Docstring Forward pass for the :class:`MultiFeedForward` model.
The user is required to define this method to choose how to combine the
networks.
""" """

View File

@@ -1,19 +1,26 @@
"""Module for Spline model""" """Module for the Spline model class"""
import torch import torch
from ..utils import check_consistency from ..utils import check_consistency
class Spline(torch.nn.Module): class Spline(torch.nn.Module):
"""TODO: Docstring for Spline.""" """
Spline model class.
"""
def __init__(self, order=4, knots=None, control_points=None) -> None: def __init__(self, order=4, knots=None, control_points=None) -> None:
""" """
Spline model. Initialization of the :class:`Spline` class.
:param int order: the order of the spline. :param int order: The order of the spline. Default is ``4``.
:param torch.Tensor knots: the knot vector. :param torch.Tensor knots: The tensor representing knots. If ``None``,
:param torch.Tensor control_points: the control points. the knots will be initialized automatically. Default is ``None``.
:param torch.Tensor control_points: The control points. Default is
``None``.
:raises ValueError: If the order is negative.
:raises ValueError: If both knots and control points are ``None``.
:raises ValueError: If the knot tensor is not one-dimensional.
""" """
super().__init__() super().__init__()
@@ -63,13 +70,13 @@ class Spline(torch.nn.Module):
def basis(self, x, k, i, t): def basis(self, x, k, i, t):
""" """
Recursive function to compute the basis functions of the spline. Recursive method to compute the basis functions of the spline.
:param torch.Tensor x: points to be evaluated. :param torch.Tensor x: The points to be evaluated.
:param int k: spline degree :param int k: The spline degree.
:param int i: the index of the interval :param int i: The index of the interval.
:param torch.Tensor t: vector of knots :param torch.Tensor t: The tensor of knots.
:return: the basis functions evaluated at x :return: The basis functions evaluated at x
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
@@ -100,11 +107,23 @@ class Spline(torch.nn.Module):
@property @property
def control_points(self): def control_points(self):
"""TODO: Docstring for control_points.""" """
The control points of the spline.
:return: The control points.
:rtype: torch.Tensor
"""
return self._control_points return self._control_points
@control_points.setter @control_points.setter
def control_points(self, value): def control_points(self, value):
"""
Set the control points of the spline.
:param value: The control points.
:type value: torch.Tensor | dict
:raises ValueError: If invalid value is passed.
"""
if isinstance(value, dict): if isinstance(value, dict):
if "n" not in value: if "n" not in value:
raise ValueError("Invalid value for control_points") raise ValueError("Invalid value for control_points")
@@ -118,11 +137,23 @@ class Spline(torch.nn.Module):
@property @property
def knots(self): def knots(self):
"""TODO: Docstring for knots.""" """
The knots of the spline.
:return: The knots.
:rtype: torch.Tensor
"""
return self._knots return self._knots
@knots.setter @knots.setter
def knots(self, value): def knots(self, value):
"""
Set the knots of the spline.
:param value: The knots.
:type value: torch.Tensor | dict
:raises ValueError: If invalid value is passed.
"""
if isinstance(value, dict): if isinstance(value, dict):
type_ = value.get("type", "auto") type_ = value.get("type", "auto")
@@ -152,10 +183,10 @@ class Spline(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward pass of the spline model. Forward pass for the :class:`Spline` model.
:param torch.Tensor x: points to be evaluated. :param torch.Tensor x: The input tensor.
:return: the spline evaluated at x :return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
t = self.knots t = self.knots