fix doc model part 2

This commit is contained in:
giovanni
2025-03-14 16:07:08 +01:00
committed by Nicola Demo
parent 001d1fc9cf
commit f9881a79b5
18 changed files with 887 additions and 851 deletions

View File

@@ -1,5 +1,5 @@
""" """
Module containing the building blocks for models. Module for the building blocks of the neural models.
""" """
__all__ = [ __all__ = [

View File

@@ -1,4 +1,4 @@
"""Module for Averaging Neural Operator Layer class.""" """Module for the Averaging Neural Operator Block class."""
import torch import torch
from torch import nn from torch import nn
@@ -7,12 +7,12 @@ from ...utils import check_consistency
class AVNOBlock(nn.Module): class AVNOBlock(nn.Module):
r""" r"""
The PINA implementation of the inner layer of the Averaging Neural Operator. The inner block of the Averaging Neural Operator.
The operator layer performs an affine transformation where the convolution The operator layer performs an affine transformation where the convolution
is approximated with a local average. Given the input function is approximated with a local average. Given the input function
:math:`v(x)\in\mathbb{R}^{\rm{emb}}` the layer computes :math:`v(x)\in\mathbb{R}^{\rm{emb}}` the layer computes the operator update
the operator update :math:`K(v)` as: :math:`K(v)` as:
.. math:: .. math::
K(v) = \sigma\left(Wv(x) + b + \frac{1}{|\mathcal{A}|}\int v(y)dy\right) K(v) = \sigma\left(Wv(x) + b + \frac{1}{|\mathcal{A}|}\int v(y)dy\right)
@@ -28,18 +28,20 @@ class AVNOBlock(nn.Module):
.. seealso:: .. seealso::
**Original reference**: Lanthaler S. Li, Z., Kovachki, **Original reference**: Lanthaler S., Li, Z., Stuart, A. (2020).
Stuart, A. (2020). *The Nonlocal Neural Operator: Universal *The Nonlocal Neural Operator: Universal Approximation*.
Approximation*.
DOI: `arXiv preprint arXiv:2304.13221. DOI: `arXiv preprint arXiv:2304.13221.
<https://arxiv.org/abs/2304.13221>`_ <https://arxiv.org/abs/2304.13221>`_
""" """
def __init__(self, hidden_size=100, func=nn.GELU): def __init__(self, hidden_size=100, func=nn.GELU):
""" """
:param int hidden_size: Size of the hidden layer, defaults to 100. Initialization of the :class:`AVNOBlock` class.
:param func: The activation function, default to nn.GELU.
:param int hidden_size: The size of the hidden layer.
Defaults is ``100``.
:param func: The activation function.
Default is :class:`torch.nn.GELU`.
""" """
super().__init__() super().__init__()
@@ -52,17 +54,11 @@ class AVNOBlock(nn.Module):
def forward(self, x): def forward(self, x):
r""" r"""
Forward pass of the layer, it performs a sum of local average Forward pass of the block. It performs a sum of local average and an
and an affine transformation of the field. affine transformation of the field.
:param torch.Tensor x: The input tensor for performing the :param torch.Tensor x: The input tensor for performing the computation.
computation. It expects a tensor :math:`B \times N \times D`, :return: The output tensor.
where :math:`B` is the batch_size, :math:`N` the number of points
in the mesh, :math:`D` the dimension of the problem. In particular
:math:`D` is the codomain of the function :math:`v`. For example
a scalar function has :math:`D=1`, a 4-dimensional vector function
:math:`D=4`.
:return: The output tensor obtained from Average Neural Operator Block.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return self._func(self._nn(x) + torch.mean(x, dim=1, keepdim=True)) return self._func(self._nn(x) + torch.mean(x, dim=1, keepdim=True))

View File

@@ -1,4 +1,4 @@
"""Module for Base Continuous Convolution class.""" """Module for the Base Continuous Convolution class."""
from abc import ABCMeta, abstractmethod from abc import ABCMeta, abstractmethod
import torch import torch
@@ -7,8 +7,31 @@ from .utils_convolution import optimizing
class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta): class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
""" r"""
Abstract class Base Class for Continuous Convolution.
The class expects the input to be in the form:
:math:`[B \times N_{in} \times N \times D]`, where :math:`B` is the
batch_size, :math:`N_{in}` is the number of input fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
In particular:
* :math:`D` is the number of spatial variables + 1. The last column must
contain the field value.
* :math:`N_{in}` represents the number of function components.
For instance, a vectorial function :math:`f = [f_1, f_2]` has
:math:`N_{in}=2`.
:Note
A 2-dimensional vector-valued function defined on a 3-dimensional input
evaluated on a 100 points input mesh and batch size of 8 is represented
as a tensor of shape ``[8, 2, 100, 4]``, where the columns
``[:, 0, :, -1]`` and ``[:, 1, :, -1]`` represent the first and second,
components of the function, respectively.
The algorithm returns a tensor of shape:
:math:`[B \times N_{out} \times N \times D]`, where :math:`B` is the
batch_size, :math:`N_{out}` is the number of output fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
""" """
def __init__( def __init__(
@@ -22,56 +45,30 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
no_overlap=False, no_overlap=False,
): ):
""" """
Base Class for Continuous Convolution. Initialization of the :class:`BaseContinuousConv` class.
The algorithm expects input to be in the form: :param int input_numb_field: The number of input fields.
$$[B \times N_{in} \times N \times D]$$ :param int output_numb_field: The number of input fields.
where $B$ is the batch_size, $N_{in}$ is the number of input :param filter_dim: The shape of the filter.
fields, $N$ the number of points in the mesh, $D$ the dimension :type filter_dim: list[int] | tuple[int]
of the problem. In particular: :param dict stride: The stride of the filter.
* $D$ is the number of spatial variables + 1. The last column must :param torch.nn.Module model: The neural network for inner
contain the field value. For example for 2D problems $D=3$ and parametrization. Default is ``None``.
the tensor will be something like `[first coordinate, second :param bool optimize: If ``True``, optimization is performed on the
coordinate, field value]`. continuous filter. It should be used only when the training points
* $N_{in}$ represents the number of vectorial function presented. are fixed. If ``model`` is in ``eval`` mode, it is reset to
For example a vectorial function $f = [f_1, f_2]$ will have ``False``. Default is ``False``.
$N_{in}=2$. :param bool no_overlap: If ``True``, optimization is performed on the
transposed continuous filter. It should be used only when the filter
:Note positions do not overlap for different strides.
A 2-dimensional vectorial function $N_{in}=2$ of 3-dimensional Default is ``False``.
input $D=3+1=4$ with 100 points input mesh and batch size of 8 :raises ValueError: If ``input_numb_field`` is not an integer.
is represented as a tensor `[8, 2, 100, 4]`, where the columns :raises ValueError: If ``output_numb_field`` is not an integer.
`[:, 0, :, -1]` and `[:, 1, :, -1]` represent the first and :raises ValueError: If ``filter_dim`` is not a list or tuple.
second filed value respectively :raises ValueError: If ``stride`` is not a dictionary.
:raises ValueError: If ``optimize`` is not a boolean.
The algorithm returns a tensor of shape: :raises ValueError: If ``no_overlap`` is not a boolean.
$$[B \times N_{out} \times N' \times D]$$ :raises NotImplementedError: If ``no_overlap`` is ``True``.
where $B$ is the batch_size, $N_{out}$ is the number of output
fields, $N'$ the number of points in the mesh, $D$ the dimension
of the problem.
:param input_numb_field: number of fields in the input
:type input_numb_field: int
:param output_numb_field: number of fields in the output
:type output_numb_field: int
:param filter_dim: dimension of the filter
:type filter_dim: tuple/ list
:param stride: stride for the filter
:type stride: dict
:param model: neural network for inner parametrization,
defaults to None.
:type model: torch.nn.Module, optional
:param optimize: flag for performing optimization on the continuous
filter, defaults to False. The flag `optimize=True` should be
used only when the scatter datapoints are fixed through the
training. If torch model is in `.eval()` mode, the flag is
automatically set to False always.
:type optimize: bool, optional
:param no_overlap: flag for performing optimization on the transpose
continuous filter, defaults to False. The flag set to `True` should
be used only when the filter positions do not overlap for different
strides. RuntimeError will raise in case of non-compatible strides.
:type no_overlap: bool, optional
""" """
super().__init__() super().__init__()
@@ -119,12 +116,17 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
class DefaultKernel(torch.nn.Module): class DefaultKernel(torch.nn.Module):
""" """
TODO The default kernel.
""" """
def __init__(self, input_dim, output_dim): def __init__(self, input_dim, output_dim):
""" """
TODO Initialization of the :class:`DefaultKernel` class.
:param int input_dim: The input dimension.
:param int output_dim: The output dimension.
:raises ValueError: If ``input_dim`` is not an integer.
:raises ValueError: If ``output_dim`` is not an integer.
""" """
super().__init__() super().__init__()
assert isinstance(input_dim, int) assert isinstance(input_dim, int)
@@ -139,65 +141,93 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
def forward(self, x): def forward(self, x):
""" """
TODO Forward pass.
:param torch.Tensor x: The input data.
:return: The output data.
:rtype: torch.Tensor
""" """
return self._model(x) return self._model(x)
@property @property
def net(self): def net(self):
""" """
TODO The neural network for inner parametrization.
:return: The neural network.
:rtype: torch.nn.Module
""" """
return self._net return self._net
@property @property
def stride(self): def stride(self):
""" """
TODO The stride of the filter.
:return: The stride of the filter.
:rtype: dict
""" """
return self._stride return self._stride
@property @property
def filter_dim(self): def filter_dim(self):
""" """
TODO The shape of the filter.
:return: The shape of the filter.
:rtype: torch.Tensor
""" """
return self._dim return self._dim
@property @property
def input_numb_field(self): def input_numb_field(self):
""" """
TODO The number of input fields.
:return: The number of input fields.
:rtype: int
""" """
return self._input_numb_field return self._input_numb_field
@property @property
def output_numb_field(self): def output_numb_field(self):
""" """
TODO The number of output fields.
:return: The number of output fields.
:rtype: int
""" """
return self._output_numb_field return self._output_numb_field
@abstractmethod @abstractmethod
def forward(self, X): def forward(self, X):
""" """
TODO Forward pass.
:param torch.Tensor X: The input data.
""" """
@abstractmethod @abstractmethod
def transpose_overlap(self, X): def transpose_overlap(self, X):
""" """
TODO Transpose the convolution with overlap.
:param torch.Tensor X: The input data.
""" """
@abstractmethod @abstractmethod
def transpose_no_overlap(self, X): def transpose_no_overlap(self, X):
""" """
TODO Transpose the convolution without overlap.
:param torch.Tensor X: The input data.
""" """
@abstractmethod @abstractmethod
def _initialize_convolution(self, X, type_): def _initialize_convolution(self, X, type_):
""" """
TODO Initialize the convolution.
:param torch.Tensor X: The input data.
:param str type_: The type of initialization.
""" """

View File

@@ -7,30 +7,27 @@ from .integral import Integral
class ContinuousConvBlock(BaseContinuousConv): class ContinuousConvBlock(BaseContinuousConv):
""" r"""
Implementation of Continuous Convolutional operator. Continuous Convolutional block.
The algorithm expects input to be in the form:
:math:`[B, N_{in}, N, D]`
where :math:`B` is the batch_size, :math:`N_{in}` is the number of input
fields, :math:`N` the number of points in the mesh, :math:`D` the dimension
of the problem. In particular:
The class expects the input to be in the form:
:math:`[B \times N_{in} \times N \times D]`, where :math:`B` is the
batch_size, :math:`N_{in}` is the number of input fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
In particular:
* :math:`D` is the number of spatial variables + 1. The last column must * :math:`D` is the number of spatial variables + 1. The last column must
contain the field value. For example for 2D problems :math:`D=3` and contain the field value.
the tensor will be something like ``[first coordinate, second * :math:`N_{in}` represents the number of function components.
coordinate, field value]``. For instance, a vectorial function :math:`f = [f_1, f_2]` has
* :math:`N_{in}` represents the number of vectorial function presented.
For example a vectorial function :math:`f = [f_1, f_2]` will have
:math:`N_{in}=2`. :math:`N_{in}=2`.
.. seealso:: .. seealso::
**Original reference**: Coscia, D., Meneghetti, L., Demo, N. et al. **Original reference**:
*A continuous convolutional trainable filter for modelling Coscia, D., Meneghetti, L., Demo, N. et al.
unstructured data*. Comput Mech 72, 253265 (2023). *A continuous convolutional trainable filter for modelling unstructured
data*. Comput Mech 72, 253-265 (2023).
DOI `<https://doi.org/10.1007/s00466-023-02291-1>`_ DOI `<https://doi.org/10.1007/s00466-023-02291-1>`_
""" """
def __init__( def __init__(
@@ -44,53 +41,48 @@ class ContinuousConvBlock(BaseContinuousConv):
no_overlap=False, no_overlap=False,
): ):
""" """
:param input_numb_field: Number of fields :math:`N_{in}` in the input. Initialization of the :class:`ContinuousConvBlock` class.
:type input_numb_field: int
:param output_numb_field: Number of fields :math:`N_{out}` in the :param int input_numb_field: The number of input fields.
output. :param int output_numb_field: The number of input fields.
:type output_numb_field: int :param filter_dim: The shape of the filter.
:param filter_dim: Dimension of the filter. :type filter_dim: list[int] | tuple[int]
:type filter_dim: tuple(int) | list(int) :param dict stride: The stride of the filter.
:param stride: Stride for the filter. :param torch.nn.Module model: The neural network for inner
:type stride: dict parametrization. Default is ``None``.
:param model: Neural network for inner parametrization, :param bool optimize: If ``True``, optimization is performed on the
defaults to ``None``. If None, a default multilayer perceptron continuous filter. It should be used only when the training points
of width three and size twenty with ReLU activation is used. are fixed. If ``model`` is in ``eval`` mode, it is reset to
:type model: torch.nn.Module ``False``. Default is ``False``.
:param optimize: Flag for performing optimization on the continuous :param bool no_overlap: If ``True``, optimization is performed on the
filter, defaults to False. The flag `optimize=True` should be transposed continuous filter. It should be used only when the filter
used only when the scatter datapoints are fixed through the positions do not overlap for different strides.
training. If torch model is in ``.eval()`` mode, the flag is Default is ``False``.
automatically set to False always.
:type optimize: bool
:param no_overlap: Flag for performing optimization on the transpose
continuous filter, defaults to False. The flag set to `True` should
be used only when the filter positions do not overlap for different
strides. RuntimeError will raise in case of non-compatible strides.
:type no_overlap: bool
.. note:: .. note::
Using `optimize=True` the filter can be use either in `forward` If ``optimize=True``, the filter can be use either in ``forward``
or in `transpose` mode, not both. If `optimize=False` the same or in ``transpose`` mode, not both.
filter can be used for both `transpose` and `forward` modes.
:Example: :Example:
>>> class MLP(torch.nn.Module): >>> class MLP(torch.nn.Module):
def __init__(self) -> None: ... def __init__(self) -> None:
super().__init__() ... super().__init__()
self. model = torch.nn.Sequential( ... self. model = torch.nn.Sequential(
torch.nn.Linear(2, 8), ... torch.nn.Linear(2, 8),
torch.nn.ReLU(), ... torch.nn.ReLU(),
torch.nn.Linear(8, 8), ... torch.nn.Linear(8, 8),
torch.nn.ReLU(), ... torch.nn.ReLU(),
torch.nn.Linear(8, 1)) ... torch.nn.Linear(8, 1)
def forward(self, x): ... )
return self.model(x) ... def forward(self, x):
... return self.model(x)
>>> dim = [3, 3] >>> dim = [3, 3]
>>> stride = {"domain": [10, 10], >>> stride = {
"start": [0, 0], ... "domain": [10, 10],
"jumps": [3, 3], ... "start": [0, 0],
"direction": [1, 1.]} ... "jumps": [3, 3],
... "direction": [1, 1.]
... }
>>> conv = ContinuousConv2D(1, 2, dim, stride, MLP) >>> conv = ContinuousConv2D(1, 2, dim, stride, MLP)
>>> conv >>> conv
ContinuousConv2D( ContinuousConv2D(
@@ -116,7 +108,6 @@ class ContinuousConvBlock(BaseContinuousConv):
) )
) )
""" """
super().__init__( super().__init__(
input_numb_field=input_numb_field, input_numb_field=input_numb_field,
output_numb_field=output_numb_field, output_numb_field=output_numb_field,
@@ -143,13 +134,13 @@ class ContinuousConvBlock(BaseContinuousConv):
def _spawn_networks(self, model): def _spawn_networks(self, model):
""" """
Private method to create a collection of kernels Create a collection of kernels
:param model: A :class:`torch.nn.Module` model in form of Object class. :param torch.nn.Module model: A neural network model.
:type model: torch.nn.Module :raises ValueError: If the model is not a subclass of
:return: List of :class:`torch.nn.Module` models. ``torch.nn.Module``.
:return: A list of models.
:rtype: torch.nn.ModuleList :rtype: torch.nn.ModuleList
""" """
nets = [] nets = []
if self._net is None: if self._net is None:
@@ -176,13 +167,11 @@ class ContinuousConvBlock(BaseContinuousConv):
def _extract_mapped_points(self, batch_idx, index, x): def _extract_mapped_points(self, batch_idx, index, x):
""" """
Priviate method to extract mapped points in the filter Extract mapped points in the filter.
:param x: Input tensor of shape ``[channel, N, dim]`` :param torch.Tensor x: Input tensor of shape ``[channel, N, dim]``
:type x: torch.Tensor
:return: Mapped points and indeces for each channel, :return: Mapped points and indeces for each channel,
:rtype: torch.Tensor, list :rtype: tuple
""" """
mapped_points = [] mapped_points = []
indeces_channels = [] indeces_channels = []
@@ -218,11 +207,9 @@ class ContinuousConvBlock(BaseContinuousConv):
def _find_index(self, X): def _find_index(self, X):
""" """
Private method to extract indeces for convolution. Extract indeces for convolution.
:param X: Input tensor, as in ContinuousConvBlock ``__init__``.
:type X: torch.Tensor
:param torch.Tensor X: The input tensor.
""" """
# append the index for each stride # append the index for each stride
index = [] index = []
@@ -236,11 +223,9 @@ class ContinuousConvBlock(BaseContinuousConv):
def _make_grid_forward(self, X): def _make_grid_forward(self, X):
""" """
Private method to create forward convolution grid. Create forward convolution grid.
:param X: Input tensor, as in ContinuousConvBlock docstring.
:type X: torch.Tensor
:param torch.Tensor X: The input tensor.
""" """
# filter dimension + number of points in output grid # filter dimension + number of points in output grid
filter_dim = len(self._dim) filter_dim = len(self._dim)
@@ -264,12 +249,9 @@ class ContinuousConvBlock(BaseContinuousConv):
def _make_grid_transpose(self, X): def _make_grid_transpose(self, X):
""" """
Private method to create transpose convolution grid. Create transpose convolution grid.
:param X: Input tensor, as in ContinuousConvBlock docstring.
:type X: torch.Tensor
:param torch.Tensor X: The input tensor.
""" """
# initialize to all zeros # initialize to all zeros
tmp = torch.zeros_like(X).as_subclass(torch.Tensor) tmp = torch.zeros_like(X).as_subclass(torch.Tensor)
@@ -280,14 +262,12 @@ class ContinuousConvBlock(BaseContinuousConv):
def _make_grid(self, X, type_): def _make_grid(self, X, type_):
""" """
Private method to create convolution grid. Create convolution grid.
:param X: Input tensor, as in ContinuousConvBlock docstring.
:type X: torch.Tensor
:param type: Type of convolution, ``['forward', 'inverse']`` the
possibilities.
:type type: str
:param torch.Tensor X: The input tensor.
:param str type_: The type of convolution.
Available options are: ``forward`` and ``inverse``.
:raises TypeError: If the type is not in the available options.
""" """
# choose the type of convolution # choose the type of convolution
if type_ == "forward": if type_ == "forward":
@@ -300,15 +280,12 @@ class ContinuousConvBlock(BaseContinuousConv):
def _initialize_convolution(self, X, type_="forward"): def _initialize_convolution(self, X, type_="forward"):
""" """
Private method to intialize the convolution. Initialize the convolution by setting a grid and computing the index to
The convolution is initialized by setting a grid and find the points inside the filter.
calculate the index for finding the points inside the
filter.
:param X: Input tensor, as in ContinuousConvBlock docstring. :param torch.Tensor X: The input tensor.
:type X: torch.Tensor :param str type_: The type of convolution. Available options are:
:param str type: type of convolution, ``['forward', 'inverse'] ``the ``forward`` and ``inverse``. Default is ``forward``.
possibilities.
""" """
# variable for the convolution # variable for the convolution
@@ -319,11 +296,10 @@ class ContinuousConvBlock(BaseContinuousConv):
def forward(self, X): def forward(self, X):
""" """
Forward pass in the convolutional layer. Forward pass.
:param x: Input data for the convolution :math:`[B, N_{in}, N, D]`. :param torch.Tensor x: The input tensor.
:type x: torch.Tensor :return: The output tensor.
:return: Convolution output :math:`[B, N_{out}, N, D]`.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
@@ -381,25 +357,14 @@ class ContinuousConvBlock(BaseContinuousConv):
def transpose_no_overlap(self, integrals, X): def transpose_no_overlap(self, integrals, X):
""" """
Transpose pass in the layer for no-overlapping filters Transpose pass in the layer for no-overlapping filters.
:param integrals: Weights for the transpose convolution. Shape :param torch.Tensor integrals: The weights for the transpose convolution.
:math:`[B, N_{in}, N]` Expected shape :math:`[B, N_{in}, N]`.
where B is the batch_size, :math`N_{in}` is the number of input :param torch.Tensor X: The input data.
fields, :math:`N` the number of points in the mesh, D the dimension Expected shape :math:`[B, N_{in}, M, D]`.
of the problem. :return: Feed forward transpose convolution.
:type integral: torch.tensor Expected shape: :math:`[B, N_{out}, M, D]`.
:param X: Input data. Expect tensor of shape
:math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size,
:math`N_{in}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:type X: torch.Tensor
:return: Feed forward transpose convolution. Tensor of shape
:math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size,
:math`N_{out}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:rtype: torch.Tensor :rtype: torch.Tensor
.. note:: .. note::
@@ -466,25 +431,14 @@ class ContinuousConvBlock(BaseContinuousConv):
def transpose_overlap(self, integrals, X): def transpose_overlap(self, integrals, X):
""" """
Transpose pass in the layer for overlapping filters Transpose pass in the layer for overlapping filters.
:param integrals: Weights for the transpose convolution. Shape :param torch.Tensor integrals: The weights for the transpose convolution.
:math:`[B, N_{in}, N]` Expected shape :math:`[B, N_{in}, N]`.
where B is the batch_size, :math`N_{in}` is the number of input :param torch.Tensor X: The input data.
fields, :math:`N` the number of points in the mesh, D the dimension Expected shape :math:`[B, N_{in}, M, D]`.
of the problem. :return: Feed forward transpose convolution.
:type integral: torch.tensor Expected shape: :math:`[B, N_{out}, M, D]`.
:param X: Input data. Expect tensor of shape
:math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size,
:math`N_{in}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:type X: torch.Tensor
:return: Feed forward transpose convolution. Tensor of shape
:math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size,
:math`N_{out}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:rtype: torch.Tensor :rtype: torch.Tensor
.. note:: This function is automatically called when ``.transpose()`` .. note:: This function is automatically called when ``.transpose()``

View File

@@ -1,4 +1,4 @@
"""Embedding modulus.""" """Modules for the Embedding blocks."""
import torch import torch
from pina.utils import check_consistency from pina.utils import check_consistency
@@ -6,20 +6,18 @@ from pina.utils import check_consistency
class PeriodicBoundaryEmbedding(torch.nn.Module): class PeriodicBoundaryEmbedding(torch.nn.Module):
r""" r"""
Imposing hard constraint periodic boundary conditions by embedding the Enforcing hard-constrained periodic boundary conditions by embedding the
input. input.
A periodic function :math:`u:\mathbb{R}^{\rm{in}} A function :math:`u:\mathbb{R}^{\rm{in}} \rightarrow\mathbb{R}^{\rm{out}}`
\rightarrow\mathbb{R}^{\rm{out}}` periodic in the spatial is periodic with respect to the spatial coordinates :math:`\mathbf{x}`
coordinates :math:`\mathbf{x}` with periods :math:`\mathbf{L}` is such that: with period :math:`\mathbf{L}` if:
.. math:: .. math::
u(\mathbf{x}) = u(\mathbf{x} + n \mathbf{L})\;\; u(\mathbf{x}) = u(\mathbf{x} + n \mathbf{L})\;\;
\forall n\in\mathbb{N}. \forall n\in\mathbb{N}.
The :meth:`PeriodicBoundaryEmbedding` augments the input such that the The :class:`PeriodicBoundaryEmbedding` augments the input as follows:
periodic conditonsis guarantee. The input is augmented by the following
formula:
.. math:: .. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[1, \mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[1,
@@ -32,44 +30,48 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
.. seealso:: .. seealso::
**Original reference**: **Original reference**:
1. Dong, Suchuan, and Naxian Ni (2021). *A method for representing 1. Dong, Suchuan, and Naxian Ni (2021).
periodic functions and enforcing exactly periodic boundary *A method for representing periodic functions and enforcing
conditions with deep neural networks*. Journal of Computational exactly periodic boundary conditions with deep neural networks*.
Physics 435, 110242. Journal of Computational Physics 435, 110242.
DOI: `10.1016/j.jcp.2021.110242. DOI: `10.1016/j.jcp.2021.110242.
<https://doi.org/10.1016/j.jcp.2021.110242>`_ <https://doi.org/10.1016/j.jcp.2021.110242>`_
2. Wang, S., Sankaran, S., Wang, H., & Perdikaris, P. (2023). *An 2. Wang, S., Sankaran, S., Wang, H., & Perdikaris, P. (2023).
expert's guide to training physics-informed neural networks*. *An expert's guide to training physics-informed neural
networks*.
DOI: `arXiv preprint arXiv:2308.0846. DOI: `arXiv preprint arXiv:2308.0846.
<https://arxiv.org/abs/2308.08468>`_ <https://arxiv.org/abs/2308.08468>`_
.. warning:: .. warning::
The embedding is a truncated fourier expansion, and only ensures The embedding is a truncated fourier expansion, and enforces periodic
function PBC and not for its derivatives. Ensuring approximate boundary conditions only for the function, and not for its derivatives.
periodicity in Enforcement of the approximate periodicity in the derivatives can be
the derivatives of :math:`u` can be done, and extensive performed. Extensive tests have shown (see referenced papers) that this
tests have shown (also in the reference papers) that this implementation implementation can correctly enforce the periodic boundary conditions on
can correctly compute the PBC on the derivatives up to the order the derivatives up to the order :math:`\sim 2,3`. This is not guaranteed
:math:`\sim 2,3`, while it is not guarantee the periodicity for for orders :math:`>3`. The PINA module is tested only for periodic
:math:`>3`. The PINA code is tested only for function PBC and not for boundary conditions on the function itself.
its derivatives.
""" """
def __init__(self, input_dimension, periods, output_dimension=None): def __init__(self, input_dimension, periods, output_dimension=None):
""" """
:param int input_dimension: The dimension of the input tensor, it can Initialization of the :class:`PeriodicBoundaryEmbedding` block.
be checked with `tensor.ndim` method.
:param float | int | dict periods: The periodicity in each dimension for :param int input_dimension: The dimension of the input tensor.
the input data. If ``float`` or ``int`` is passed, :param periods: The periodicity with respect to each dimension for the
the period is assumed constant for all the dimensions of the data. input data. If ``float`` or ``int`` is passed, the period is assumed
If a ``dict`` is passed the `dict.values` represent periods, to be constant over all the dimensions of the data. If a ``dict`` is
while the ``dict.keys`` represent the dimension where the passed the `dict.values` represent periods, while the ``dict.keys``
periodicity is applied. The `dict.keys` can either be `int` represent the dimension where the periodicity is enforced.
if working with ``torch.Tensor`` or ``str`` if The `dict.keys` can either be `int` if working with
working with ``LabelTensor``. :class:`torch.Tensor`, or ``str`` if working with
:class:`pina.label_tensor.LabelTensor`.
:type periods: float | int | dict
:param int output_dimension: The dimension of the output after the :param int output_dimension: The dimension of the output after the
fourier embedding. If not ``None`` a ``torch.nn.Linear`` layer fourier embedding. If not ``None``, a :class:`torch.nn.Linear` layer
is applied to the fourier embedding output to match the desired is applied to the fourier embedding output to match the desired
dimensionality, default ``None``. dimensionality. Default is ``None``.
:raises TypeError: If the periods dict is not consistent.
""" """
super().__init__() super().__init__()
@@ -98,9 +100,10 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward pass to compute the periodic boundary conditions embedding. Forward pass.
:param torch.Tensor x: Input tensor. :param x: The input tensor.
:type x: torch.Tensor | LabelTensor
:return: Periodic embedding of the input. :return: Periodic embedding of the input.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
@@ -125,12 +128,16 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
def _get_vars(self, x, indeces): def _get_vars(self, x, indeces):
""" """
Get variables from input tensor ordered by specific indeces. Get the variables from input tensor ordered by specific indeces.
:param torch.Tensor x: The input tensor to extract. :param x: The input tensor from which to extract.
:param list[int] | list[str] indeces: List of indeces to extract. :type x: torch.Tensor | LabelTensor
:return: The extracted tensor given the indeces. :param indeces: The indeces to extract.
:rtype: torch.Tensor :type indeces: list[int] | list[str]
:raises RuntimeError: If the indeces are not consistent.
:raises RuntimeError: If the extraction is not possible.
:return: The extracted tensor.
:rtype: torch.Tensor | LabelTensor
""" """
if isinstance(indeces[0], str): if isinstance(indeces[0], str):
try: try:
@@ -146,75 +153,79 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
return x[..., indeces] return x[..., indeces]
else: else:
raise RuntimeError( raise RuntimeError(
"Not able to extract right indeces for tensor." "Not able to extract correct indeces for tensor."
" For more information refer to warning in the documentation." " For more information refer to warning in the documentation."
) )
@property @property
def period(self): def period(self):
""" """
The period of the periodic function to approximate. The period of the function.
:return: The period of the function.
:rtype: dict | float | int
""" """
return self._period return self._period
class FourierFeatureEmbedding(torch.nn.Module): class FourierFeatureEmbedding(torch.nn.Module):
""" r"""
Fourier Feature Embedding class for encoding input features Fourier Feature Embedding class to encode the input features using random
using random Fourier features. Fourier features.
This class applies a Fourier transformation to the input features, which can
help in learning high-frequency variations in data. The class supports
multiscale feature embedding, creating embeddings for each scale specified
by the ``sigma`` parameter.
The Fourier Feature Embedding augments the input features as follows
(3.10 of original paper):
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[
\cos\left( \mathbf{B} \mathbf{x} \right),
\sin\left( \mathbf{B} \mathbf{x} \right)\right],
where :math:`\mathbf{B}_{ij} \sim \mathcal{N}(0, \sigma^2)`.
If multiple ``sigma`` are passed, the resulting embeddings are concateneted:
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[
\cos\left( \mathbf{B}^1 \mathbf{x} \right),
\sin\left( \mathbf{B}^1 \mathbf{x} \right),
\cos\left( \mathbf{B}^2 \mathbf{x} \right),
\sin\left( \mathbf{B}^3 \mathbf{x} \right),
\dots,
\cos\left( \mathbf{B}^M \mathbf{x} \right),
\sin\left( \mathbf{B}^M \mathbf{x} \right)\right],
where :math:`\mathbf{B}^k_{ij} \sim \mathcal{N}(0, \sigma_k^2) \quad k \in
(1, \dots, M)`.
.. seealso::
**Original reference**:
Wang, S., Wang, H., and Perdikaris, P. (2021).
*On the eigenvector bias of Fourier feature networks: From regression to
solving multi-scale PDEs with physics-informed neural networks.*
Computer Methods in Applied Mechanics and Engineering 384 (2021):
113938.
DOI: `10.1016/j.cma.2021.113938.
<https://doi.org/10.1016/j.cma.2021.113938>`_
""" """
def __init__(self, input_dimension, output_dimension, sigma): def __init__(self, input_dimension, output_dimension, sigma):
r""" """
This class applies a Fourier transformation to the input features, Initialization of the :class:`FourierFeatureEmbedding` block.
which can help in learning high-frequency variations in data.
If multiple sigma are provided, the class
supports multiscale feature embedding, creating embeddings for
each scale specified by the sigma.
The :obj:`FourierFeatureEmbedding` augments the input :param int input_dimension: The dimension of the input tensor.
by the following formula (3.10 of original paper): :param int output_dimension: The dimension of the output tensor. The
output is obtained as a concatenation of cosine and sine embeddings.
.. math:: :param sigma: The standard deviation used for the Fourier Embedding.
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[ This value must reflect the granularity of the scale in the
\cos\left( \mathbf{B} \mathbf{x} \right), differential equation solution.
\sin\left( \mathbf{B} \mathbf{x} \right)\right], :type sigma: float | int
:raises RuntimeError: If the output dimension is not an even number.
where :math:`\mathbf{B}_{ij} \sim \mathcal{N}(0, \sigma^2)`.
In case multiple ``sigma`` are passed, the resulting embeddings
are concateneted:
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[
\cos\left( \mathbf{B}^1 \mathbf{x} \right),
\sin\left( \mathbf{B}^1 \mathbf{x} \right),
\cos\left( \mathbf{B}^2 \mathbf{x} \right),
\sin\left( \mathbf{B}^3 \mathbf{x} \right),
\dots,
\cos\left( \mathbf{B}^M \mathbf{x} \right),
\sin\left( \mathbf{B}^M \mathbf{x} \right)\right],
where :math:`\mathbf{B}^k_{ij} \sim \mathcal{N}(0, \sigma_k^2) \quad
k \in (1, \dots, M)`.
.. seealso::
**Original reference**:
Wang, Sifan, Hanwen Wang, and Paris Perdikaris. *On the eigenvector
bias of Fourier feature networks: From regression to solving
multi-scale PDEs with physics-informed neural networks.*
Computer Methods in Applied Mechanics and
Engineering 384 (2021): 113938.
DOI: `10.1016/j.cma.2021.113938.
<https://doi.org/10.1016/j.cma.2021.113938>`_
:param int input_dimension: The input vector dimension of the layer.
:param int output_dimension: The output dimension of the layer. The
output is obtained as a concatenation of the cosine and sine
embedding, hence it must be a multiple of two (even number).
:param int | float sigma: The standard deviation used for the
Fourier Embedding. This value must reflect the granularity of the
scale in the differential equation solution.
""" """
super().__init__() super().__init__()
@@ -242,10 +253,11 @@ class FourierFeatureEmbedding(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward pass to compute the fourier embedding. Forward pass.
:param torch.Tensor x: Input tensor. :param x: The input tensor.
:return: Fourier embeddings of the input. :type x: torch.Tensor | LabelTensor
:return: Fourier embedding of the input.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
# compute random matrix multiplication # compute random matrix multiplication
@@ -259,6 +271,9 @@ class FourierFeatureEmbedding(torch.nn.Module):
@property @property
def sigma(self): def sigma(self):
""" """
Returning the variance of the sampled matrix for Fourier Embedding. The standard deviation used for the Fourier Embedding.
:return: The standard deviation used for the Fourier Embedding.
:rtype: float | int
""" """
return self._sigma return self._sigma

View File

@@ -1,5 +1,5 @@
""" """
Module for Fourier Block implementation. Module for the Fourier Neural Operator Block class.
""" """
import torch import torch
@@ -15,15 +15,19 @@ from .spectral import (
class FourierBlock1D(nn.Module): class FourierBlock1D(nn.Module):
""" """
Fourier block implementation for three dimensional The inner block of the Fourier Neural Operator for 1-dimensional input
input tensor. The combination of Fourier blocks tensors.
make up the Fourier Neural Operator
The module computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the input in
the physical space. Finally an activation function is applied to the output.
.. seealso:: .. seealso::
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K., **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
neural operator for parametric partial differential equations*. *Fourier neural operator for parametric partial differential equations*.
DOI: `arXiv preprint arXiv:2010.08895. DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895>`_ <https://arxiv.org/abs/2010.08895>`_
@@ -36,22 +40,16 @@ class FourierBlock1D(nn.Module):
n_modes, n_modes,
activation=torch.nn.Tanh, activation=torch.nn.Tanh,
): ):
""" r"""
PINA implementation of Fourier block one dimension. The module computes Initialization of the :class:`FourierBlock1D` class.
the spectral convolution of the input with a linear kernel in the
fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the
input in the physical space. Finally an activation function is
applied to the output.
The block expects an input of size ``[batch, input_numb_fields, N]``
and returns an output of size ``[batch, output_numb_fields, N]``.
:param int input_numb_fields: The number of channels for the input. :param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output. :param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each :param n_modes: The number of modes to select for each dimension.
dimension. It must be at most equal to the ``floor(N/2)+1``. It must be at most equal to :math:`\floor(Nx/2)+1`.
:type n_modes: list[int] | tuple[int]
:param torch.nn.Module activation: The activation function. :param torch.nn.Module activation: The activation function.
Default is :class:`torch.nn.Tanh`.
""" """
super().__init__() super().__init__()
@@ -70,15 +68,11 @@ class FourierBlock1D(nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward computation for Fourier Block. It performs a spectral Forward pass of the block. It performs a spectral convolution and a
convolution and a linear transformation of the input and sum the linear transformation of the input. Then, it sums the results.
results.
:param x: The input tensor for fourier block, expect of size :param torch.Tensor x: The input tensor for performing the computation.
``[batch, input_numb_fields, x]``. :return: The output tensor.
:type x: torch.Tensor
:return: The output tensor obtained from the
fourier block of size ``[batch, output_numb_fields, x]``.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return self._activation(self._spectral_conv(x) + self._linear(x)) return self._activation(self._spectral_conv(x) + self._linear(x))
@@ -86,18 +80,21 @@ class FourierBlock1D(nn.Module):
class FourierBlock2D(nn.Module): class FourierBlock2D(nn.Module):
""" """
Fourier block implementation for two dimensional The inner block of the Fourier Neural Operator for 2-dimensional input
input tensor. The combination of Fourier blocks tensors.
make up the Fourier Neural Operator
The module computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the input in
the physical space. Finally an activation function is applied to the output.
.. seealso:: .. seealso::
**Original reference**: Li, Zongyi, et al. **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
*Fourier neural operator for parametric partial Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
differential equations*. arXiv preprint *Fourier neural operator for parametric partial differential equations*.
arXiv:2010.08895 (2020) DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895.pdf>`_. <https://arxiv.org/abs/2010.08895>`_
""" """
def __init__( def __init__(
@@ -107,24 +104,17 @@ class FourierBlock2D(nn.Module):
n_modes, n_modes,
activation=torch.nn.Tanh, activation=torch.nn.Tanh,
): ):
""" r"""
PINA implementation of Fourier block two dimensions. The module computes Initialization of the :class:`FourierBlock2D` class.
the spectral convolution of the input with a linear kernel in the
fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the
input in the physical space. Finally an activation function is
applied to the output.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny]`` and returns an output of size
``[batch, output_numb_fields, Nx, Ny]``.
:param int input_numb_fields: The number of channels for the input. :param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output. :param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each :param n_modes: The number of modes to select for each dimension.
dimension. It must be at most equal to the ``floor(Nx/2)+1`` It must be at most equal to :math:`\floor(Nx/2)+1`,
and ``floor(Ny/2)+1``. :math:`\floor(Ny/2)+1`.
:type n_modes: list[int] | tuple[int]
:param torch.nn.Module activation: The activation function. :param torch.nn.Module activation: The activation function.
Default is :class:`torch.nn.Tanh`.
""" """
super().__init__() super().__init__()
@@ -142,15 +132,11 @@ class FourierBlock2D(nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward computation for Fourier Block. It performs a spectral Forward pass of the block. It performs a spectral convolution and a
convolution and a linear transformation of the input and sum the linear transformation of the input. Then, it sums the results.
results.
:param x: The input tensor for fourier block, expect of size :param torch.Tensor x: The input tensor for performing the computation.
``[batch, input_numb_fields, x, y]``. :return: The output tensor.
:type x: torch.Tensor
:return: The output tensor obtained from the
fourier block of size ``[batch, output_numb_fields, x, y, z]``.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return self._activation(self._spectral_conv(x) + self._linear(x)) return self._activation(self._spectral_conv(x) + self._linear(x))
@@ -158,18 +144,21 @@ class FourierBlock2D(nn.Module):
class FourierBlock3D(nn.Module): class FourierBlock3D(nn.Module):
""" """
Fourier block implementation for three dimensional The inner block of the Fourier Neural Operator for 3-dimensional input
input tensor. The combination of Fourier blocks tensors.
make up the Fourier Neural Operator
The module computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the input in
the physical space. Finally an activation function is applied to the output.
.. seealso:: .. seealso::
**Original reference**: Li, Zongyi, et al. **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
*Fourier neural operator for parametric partial Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
differential equations*. arXiv preprint *Fourier neural operator for parametric partial differential equations*.
arXiv:2010.08895 (2020) DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895.pdf>`_. <https://arxiv.org/abs/2010.08895>`_
""" """
def __init__( def __init__(
@@ -179,24 +168,17 @@ class FourierBlock3D(nn.Module):
n_modes, n_modes,
activation=torch.nn.Tanh, activation=torch.nn.Tanh,
): ):
""" r"""
PINA implementation of Fourier block three dimensions. The module Initialization of the :class:`FourierBlock3D` class.
computes the spectral convolution of the input with a linear kernel in
the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the
input in the physical space. Finally an activation function is
applied to the output.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny, Nz]`` and returns an output of size
``[batch, output_numb_fields, Nx, Ny, Nz]``.
:param int input_numb_fields: The number of channels for the input. :param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output. :param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each :param n_modes: The number of modes to select for each dimension.
dimension. It must be at most equal to the ``floor(Nx/2)+1``, It must be at most equal to :math:`\floor(Nx/2)+1`,
``floor(Ny/2)+1`` and ``floor(Nz/2)+1``. :math:`\floor(Ny/2)+1`, :math:`\floor(Nz/2)+1`.
:type n_modes: list[int] | tuple[int]
:param torch.nn.Module activation: The activation function. :param torch.nn.Module activation: The activation function.
Default is :class:`torch.nn.Tanh`.
""" """
super().__init__() super().__init__()
@@ -214,15 +196,11 @@ class FourierBlock3D(nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward computation for Fourier Block. It performs a spectral Forward pass of the block. It performs a spectral convolution and a
convolution and a linear transformation of the input and sum the linear transformation of the input. Then, it sums the results.
results.
:param x: The input tensor for fourier block, expect of size :param torch.Tensor x: The input tensor for performing the computation.
``[batch, input_numb_fields, x, y, z]``. :return: The output tensor.
:type x: torch.Tensor
:return: The output tensor obtained from the
fourier block of size ``[batch, output_numb_fields, x, y, z]``.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return self._activation(self._spectral_conv(x) + self._linear(x)) return self._activation(self._spectral_conv(x) + self._linear(x))

View File

@@ -1,5 +1,5 @@
""" """
Module containing the Graph Integral Layer class. Module for the Graph Neural Operator Block class.
""" """
import torch import torch
@@ -8,7 +8,7 @@ from torch_geometric.nn import MessagePassing
class GNOBlock(MessagePassing): class GNOBlock(MessagePassing):
""" """
Graph Neural Operator (GNO) Block using PyG MessagePassing. The inner block of the Graph Neural Operator, based on Message Passing.
""" """
def __init__( def __init__(
@@ -22,11 +22,22 @@ class GNOBlock(MessagePassing):
external_func=None, external_func=None,
): ):
""" """
Initialize the GNOBlock. Initialization of the :class:`GNOBlock` class.
:param width: Hidden dimension of node features. :param int width: The width of the kernel.
:param edges_features: Number of edge features. :param int edge_features: The number of edge features.
:param n_layers: Number of layers in edge transformation MLP. :param int n_layers: The number of kernel layers. Default is ``2``.
:param layers: A list specifying the number of neurons for each layer
of the neural network. If not ``None``, it overrides the
``inner_size`` and ``n_layers``parameters. Default is ``None``.
:type layers: list[int] | tuple[int]
:param int inner_size: The size of the inner layer. Default is ``None``.
:param torch.nn.Module internal_func: The activation function applied to
the output of each layer. If ``None``, it uses the
:class:`torch.nn.Tanh` activation. Default is ``None``.
:param torch.nn.Module external_func: The activation function applied to
the output of the block. If ``None``, it uses the
:class:`torch.nn.Tanh`. activation. Default is ``None``.
""" """
from ...model.feed_forward import FeedForward from ...model.feed_forward import FeedForward
@@ -51,12 +62,13 @@ class GNOBlock(MessagePassing):
def message_and_aggregate(self, edge_index, x, edge_attr): def message_and_aggregate(self, edge_index, x, edge_attr):
""" """
Combines message and aggregation. Combine messages and perform aggregation.
:param edge_index: COO format edge indices. :param torch.Tensor edge_index: The edge index.
:param x: Node feature matrix [num_nodes, width]. :param torch.Tensor x: The node feature matrix.
:param edge_attr: Edge features [num_edges, edge_dim]. :param torch.Tensor edge_attr: The edge features.
:return: Aggregated messages. :return: The aggregated messages.
:rtype: torch.Tensor
""" """
# Edge features are transformed into a matrix of shape # Edge features are transformed into a matrix of shape
# [num_edges, width, width] # [num_edges, width, width]
@@ -68,27 +80,33 @@ class GNOBlock(MessagePassing):
def edge_update(self, edge_attr): def edge_update(self, edge_attr):
""" """
Updates edge features. Update edge features.
:param torch.Tensor edge_attr: The edge features.
:return: The updated edge features.
:rtype: torch.Tensor
""" """
return edge_attr return edge_attr
def update(self, aggr_out, x): def update(self, aggr_out, x):
""" """
Updates node features. Update node features.
:param aggr_out: Aggregated messages. :param torch.Tensor aggr_out: The aggregated messages.
:param x: Node feature matrix. :param torch.Tensor x: The node feature matrix.
:return: Updated node features. :return: The updated node features.
:rtype: torch.Tensor
""" """
return aggr_out + self.W(x) return aggr_out + self.W(x)
def forward(self, x, edge_index, edge_attr): def forward(self, x, edge_index, edge_attr):
""" """
Forward pass of the GNOBlock. Forward pass of the block.
:param x: Node features. :param torch.Tensor x: The node features.
:param edge_index: Edge indices. :param torch.Tensor edge_index: The edge indeces.
:param edge_attr: Edge features. :param torch.Tensor edge_attr: The edge features.
:return: Updated node features. :return: The updated node features.
:rtype: torch.Tensor
""" """
return self.func(self.propagate(edge_index, x=x, edge_attr=edge_attr)) return self.func(self.propagate(edge_index, x=x, edge_attr=edge_attr))

View File

@@ -1,5 +1,5 @@
""" """
Module for performing integral for continuous convolution Module to perform integration for continuous convolution.
""" """
import torch import torch
@@ -7,17 +7,18 @@ import torch
class Integral: class Integral:
""" """
Integral class for continous convolution Class allowing integration for continous convolution.
""" """
def __init__(self, param): def __init__(self, param):
""" """
Initialize the integral class Initializzation of the :class:`Integral` class.
:param param: type of continuous convolution :param param: The type of continuous convolution.
:type param: string :type param: string
:raises TypeError: If the parameter is neither ``discrete``
nor ``continuous``.
""" """
if param == "discrete": if param == "discrete":
self.make_integral = self.integral_param_disc self.make_integral = self.integral_param_disc
elif param == "continuous": elif param == "continuous":
@@ -26,46 +27,47 @@ class Integral:
raise TypeError raise TypeError
def __call__(self, *args, **kwds): def __call__(self, *args, **kwds):
"""
Call the integral function
:param list args: Arguments for the integral function.
:param dict kwds: Keyword arguments for the integral function.
:return: The integral of the input.
:rtype: torch.tensor
"""
return self.make_integral(*args, **kwds) return self.make_integral(*args, **kwds)
def _prepend_zero(self, x): def _prepend_zero(self, x):
"""Create bins for performing integral """
Create bins to perform integration.
:param x: input tensor :param torch.Tensor x: The input tensor.
:type x: torch.tensor :return: The bins for the integral.
:return: bins for integrals :rtype: torch.Tensor
:rtype: torch.tensor
""" """
return torch.cat((torch.zeros(1, dtype=x.dtype, device=x.device), x)) return torch.cat((torch.zeros(1, dtype=x.dtype, device=x.device), x))
def integral_param_disc(self, x, y, idx): def integral_param_disc(self, x, y, idx):
"""Perform discretize integral """
with discrete parameters Perform discrete integration with discrete parameters.
:param x: input vector :param torch.Tensor x: The first input tensor.
:type x: torch.tensor :param torch.Tensor y: The second input tensor.
:param y: input vector :param list[int] idx: The indices for different strides.
:type y: torch.tensor :return: The discrete integral.
:param idx: indeces for different strides :rtype: torch.Tensor
:type idx: list
:return: integral
:rtype: torch.tensor
""" """
cs_idxes = self._prepend_zero(torch.cumsum(torch.tensor(idx), 0)) cs_idxes = self._prepend_zero(torch.cumsum(torch.tensor(idx), 0))
cs = self._prepend_zero(torch.cumsum(x.flatten() * y.flatten(), 0)) cs = self._prepend_zero(torch.cumsum(x.flatten() * y.flatten(), 0))
return cs[cs_idxes[1:]] - cs[cs_idxes[:-1]] return cs[cs_idxes[1:]] - cs[cs_idxes[:-1]]
def integral_param_cont(self, x, y, idx): def integral_param_cont(self, x, y, idx):
"""Perform discretize integral for continuous convolution """
with continuous parameters Perform continuous integration with continuous parameters.
:param x: input vector :param torch.Tensor x: The first input tensor.
:type x: torch.tensor :param torch.Tensor y: The second input tensor.
:param y: input vector :param list[int] idx: The indices for different strides.
:type y: torch.tensor :raises NotImplementedError: The method is not implemented.
:param idx: indeces for different strides
:type idx: list
:return: integral
:rtype: torch.tensor
""" """
raise NotImplementedError raise NotImplementedError

View File

@@ -1,4 +1,4 @@
"""Module for Averaging Neural Operator Layer class.""" """Module for the Low Rank Neural Operator Block class."""
import torch import torch
@@ -6,30 +6,8 @@ from ...utils import check_consistency
class LowRankBlock(torch.nn.Module): class LowRankBlock(torch.nn.Module):
r""" """
The PINA implementation of the inner layer of the Averaging Neural Operator. The inner block of the Low Rank Neural Operator.
The operator layer performs an affine transformation where the convolution
is approximated with a local average. Given the input function
:math:`v(x)\in\mathbb{R}^{\rm{emb}}` the layer computes
the operator update :math:`K(v)` as:
.. math::
K(v) = \sigma\left(Wv(x) + b + \sum_{i=1}^r \langle
\psi^{(i)} , v(x) \rangle \phi^{(i)} \right)
where:
* :math:`\mathbb{R}^{\rm{emb}}` is the embedding (hidden) size
corresponding to the ``hidden_size`` object
* :math:`\sigma` is a non-linear activation, corresponding to the
``func`` object
* :math:`W\in\mathbb{R}^{\rm{emb}\times\rm{emb}}` is a tunable matrix.
* :math:`b\in\mathbb{R}^{\rm{emb}}` is a tunable bias.
* :math:`\psi^{(i)}\in\mathbb{R}^{\rm{emb}}` and
:math:`\phi^{(i)}\in\mathbb{R}^{\rm{emb}}` are :math:`r` a low rank
basis functions mapping.
* :math:`b\in\mathbb{R}^{\rm{emb}}` is a tunable bias.
.. seealso:: .. seealso::
@@ -38,7 +16,6 @@ class LowRankBlock(torch.nn.Module):
(2023). *Neural operator: Learning maps between function (2023). *Neural operator: Learning maps between function
spaces with applications to PDEs*. Journal of Machine Learning spaces with applications to PDEs*. Journal of Machine Learning
Research, 24(89), 1-97. Research, 24(89), 1-97.
""" """
def __init__( def __init__(
@@ -51,30 +28,25 @@ class LowRankBlock(torch.nn.Module):
func=torch.nn.Tanh, func=torch.nn.Tanh,
bias=True, bias=True,
): ):
""" r"""
:param int input_dimensions: The number of input components of the Initialization of the :class:`LowRankBlock` class.
model.
Expected tensor shape of the form :math:`(*, d)`, where * :param int input_dimensions: The input dimension of the field.
means any number of dimensions including none, :param int embedding_dimenion: The embedding dimension of the field.
and :math:`d` the ``input_dimensions``. :param int rank: The rank of the low rank approximation. The expected
:param int embedding_dimenion: Size of the embedding dimension of the value is :math:`2d`, where :math:`d` is the rank of each basis
field. function.
:param int rank: The rank number of the basis approximation components :param int inner_size: The number of neurons for each hidden layer in
of the model. Expected tensor shape of the form :math:`(*, 2d)`, the basis function neural network. Default is ``20``.
where * means any number of dimensions including none, :param int n_layers: The number of hidden layers in the basis function
and :math:`2d` the ``rank`` for both basis functions. neural network. Default is ``2``.
:param int inner_size: Number of neurons in the hidden layer(s) for the :param func: The activation function. If a list is passed, it must have
basis function network. Default is 20. the same length as ``n_layers``. If a single function is passed, it
:param int n_layers: Number of hidden layers. for the is used for all layers, except for the last one.
basis function network. Default is 2. Default is :class:`torch.nn.Tanh`.
:param func: The activation function to use for the :type func: torch.nn.Module | list[torch.nn.Module]
basis function network. If a single :param bool bias: If ``True`` bias is considered for the basis function
:class:`torch.nn.Module` is passed, this is used as neural network. Default is ``True``.
activation function after any layers, except the last one.
If a list of Modules is passed,
they are used as activation functions at any layers, in order.
:param bool bias: If ``True`` the MLP will consider some bias for the
basis function network.
""" """
super().__init__() super().__init__()
from ..feed_forward import FeedForward from ..feed_forward import FeedForward
@@ -96,26 +68,16 @@ class LowRankBlock(torch.nn.Module):
def forward(self, x, coords): def forward(self, x, coords):
r""" r"""
Forward pass of the layer, it performs an affine transformation of Forward pass of the block. It performs an affine transformation of the
the field, and a low rank approximation by field, followed by a low rank approximation. The latter is performed by
doing a dot product of the basis means of a dot product of the basis :math:`\psi^{(i)}` with the vector
:math:`\psi^{(i)}` with the filed vector :math:`v`, and use this field :math:`v` to compute coefficients used to expand
coefficients to expand :math:`\phi^{(i)}` evaluated in the :math:`\phi^{(i)}`, evaluated in the spatial input :math:`x`.
spatial input :math:`x`.
:param torch.Tensor x: The input tensor for performing the :param torch.Tensor x: The input tensor for performing the computation.
computation. It expects a tensor :math:`B \times N \times D`, :param torch.Tensor coords: The coordinates for which the field is
where :math:`B` is the batch_size, :math:`N` the number of points evaluated to perform the computation.
in the mesh, :math:`D` the dimension of the problem. In particular :return: The output tensor.
:math:`D` is the codomain of the function :math:`v`. For example
a scalar function has :math:`D=1`, a 4-dimensional vector function
:math:`D=4`.
:param torch.Tensor coords: The coordinates in which the field is
evaluated for performing the computation. It expects a
tensor :math:`B \times N \times d`,
where :math:`B` is the batch_size, :math:`N` the number of points
in the mesh, :math:`D` the dimension of the domain.
:return: The output tensor obtained from Average Neural Operator Block.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
# extract basis # extract basis
@@ -138,5 +100,8 @@ class LowRankBlock(torch.nn.Module):
def rank(self): def rank(self):
""" """
The basis rank. The basis rank.
:return: The basis rank.
:rtype: int
""" """
return self._rank return self._rank

View File

@@ -1,4 +1,4 @@
"""Module for OrthogonalBlock.""" """Module for the Orthogonal Block class."""
import torch import torch
from ...utils import check_consistency from ...utils import check_consistency
@@ -6,21 +6,24 @@ from ...utils import check_consistency
class OrthogonalBlock(torch.nn.Module): class OrthogonalBlock(torch.nn.Module):
""" """
Module to make the input orthonormal. Orthogonal Block.
The module takes a tensor of size :math:`[N, M]` and returns a tensor of
size :math:`[N, M]` where the columns are orthonormal. The block performs a This block transforms an input tensor of shape :math:`[N, M]` into a tensor
Gram Schmidt orthogonalization process for the input, see of the same shape whose columns are orthonormal. The block performs the
Gram Schmidt orthogonalization, see
`here <https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process>` for `here <https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process>` for
details. details.
""" """
def __init__(self, dim=-1, requires_grad=True): def __init__(self, dim=-1, requires_grad=True):
""" """
Initialize the OrthogonalBlock module. Initialization of the :class:`OrthogonalBlock` class.
:param int dim: The dimension where to orthogonalize. :param int dim: The dimension on which orthogonalization is performed.
:param bool requires_grad: If autograd should record operations on If ``-1``, the orthogonalization is performed on the last dimension.
the returned tensor, defaults to True. Default is ``-1``.
:param bool requires_grad: If ``True``, the gradients are computed
during the backward pass. Default is ``True``
""" """
super().__init__() super().__init__()
# store dim # store dim
@@ -31,14 +34,13 @@ class OrthogonalBlock(torch.nn.Module):
def forward(self, X): def forward(self, X):
""" """
Forward pass of the OrthogonalBlock module using a Gram-Schmidt Forward pass.
algorithm.
:raises Warning: If the dimension is greater than the other dimensions. :param torch.Tensor X: The input tensor to orthogonalize.
:raises Warning: If the chosen dimension is greater than the other
:param torch.Tensor X: The input tensor to orthogonalize. The input must dimensions in the input.
be of dimensions :math:`[N, M]`.
:return: The orthonormal tensor. :return: The orthonormal tensor.
:rtype: torch.Tensor
""" """
# check dim is less than all the other dimensions # check dim is less than all the other dimensions
if X.shape[self.dim] > min(X.shape): if X.shape[self.dim] > min(X.shape):
@@ -65,13 +67,12 @@ class OrthogonalBlock(torch.nn.Module):
def _differentiable_copy(self, result, idx, value): def _differentiable_copy(self, result, idx, value):
""" """
Perform a differentiable copy operation on a tensor. Perform a differentiable copy operation.
:param torch.Tensor result: The tensor where values will be copied to. :param torch.Tensor result: The tensor where values are be copied to.
:param int idx: The index along the specified dimension where the :param int idx: The index along the specified dimension where the
value will be copied. values are copied.
:param torch.Tensor value: The tensor value to copy into the :param torch.Tensor value: The tensor value to copy into ``result``.
result tensor.
:return: A new tensor with the copied values. :return: A new tensor with the copied values.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
@@ -82,7 +83,7 @@ class OrthogonalBlock(torch.nn.Module):
@property @property
def dim(self): def dim(self):
""" """
Get the dimension along which operations are performed. The dimension along which operations are performed.
:return: The current dimension value. :return: The current dimension value.
:rtype: int :rtype: int
@@ -94,10 +95,11 @@ class OrthogonalBlock(torch.nn.Module):
""" """
Set the dimension along which operations are performed. Set the dimension along which operations are performed.
:param value: The dimension to be set, which must be 0, 1, or -1. :param value: The dimension to be set. Must be either ``0``, ``1``, or
``-1``.
:type value: int :type value: int
:raises IndexError: If the provided dimension is not in the :raises IndexError: If the provided dimension is not ``0``, ``1``, or
range [-1, 1]. ``-1``.
""" """
# check consistency # check consistency
check_consistency(value, int) check_consistency(value, int)
@@ -115,7 +117,7 @@ class OrthogonalBlock(torch.nn.Module):
Indicates whether gradient computation is required for operations Indicates whether gradient computation is required for operations
on the tensors. on the tensors.
:return: True if gradients are required, False otherwise. :return: ``True`` if gradients are required, ``False`` otherwise.
:rtype: bool :rtype: bool
""" """
return self._requires_grad return self._requires_grad

View File

@@ -5,23 +5,26 @@ import torch
class PODBlock(torch.nn.Module): class PODBlock(torch.nn.Module):
""" """
POD layer: it projects the input field on the proper orthogonal Proper Orthogonal Decomposition block.
decomposition basis. It needs to be fitted to the data before being used
with the method :meth:`fit`, which invokes the singular value decomposition. This block projects the input field on the proper orthogonal decomposition
The layer is not trainable. basis. Before being used, it must be fitted to the data with the ``fit``
method, which invokes the singular value decomposition. This block is not
trainable.
.. note:: .. note::
All the POD modes are stored in memory, avoiding to recompute them when All the POD modes are stored in memory, avoiding to recompute them when
the rank changes but increasing the memory usage. the rank changes, leading to increased memory usage.
""" """
def __init__(self, rank, scale_coefficients=True): def __init__(self, rank, scale_coefficients=True):
""" """
Build the POD layer with the given rank. Initialization of the :class:`PODBlock` class.
:param int rank: The rank of the POD layer. :param int rank: The rank of the POD layer.
:param bool scale_coefficients: If True, the coefficients are scaled :param bool scale_coefficients: If ``True``, the coefficients are scaled
after the projection to have zero mean and unit variance. after the projection to have zero mean and unit variance.
Default is ``True``.
""" """
super().__init__() super().__init__()
self.__scale_coefficients = scale_coefficients self.__scale_coefficients = scale_coefficients
@@ -34,12 +37,19 @@ class PODBlock(torch.nn.Module):
""" """
The rank of the POD layer. The rank of the POD layer.
:return: The rank of the POD layer.
:rtype: int :rtype: int
""" """
return self._rank return self._rank
@rank.setter @rank.setter
def rank(self, value): def rank(self, value):
"""
Set the rank of the POD layer.
:param int value: The new rank of the POD layer.
:raises ValueError: If the rank is not a positive integer.
"""
if value < 1 or not isinstance(value, int): if value < 1 or not isinstance(value, int):
raise ValueError("The rank must be positive integer") raise ValueError("The rank must be positive integer")
@@ -48,9 +58,10 @@ class PODBlock(torch.nn.Module):
@property @property
def basis(self): def basis(self):
""" """
The POD basis. It is a matrix whose columns are the first `self.rank` The POD basis. It is a matrix whose columns are the first ``rank`` POD
POD modes. modes.
:return: The POD basis.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
if self._basis is None: if self._basis is None:
@@ -61,9 +72,11 @@ class PODBlock(torch.nn.Module):
@property @property
def scaler(self): def scaler(self):
""" """
The scaler. It is a dictionary with the keys `'mean'` and `'std'` that Return the scaler dictionary, having keys ``mean`` and ``std``
store the mean and the standard deviation of the coefficients. corresponding to the mean and the standard deviation of the
coefficients, respectively.
:return: The scaler dictionary.
:rtype: dict :rtype: dict
""" """
if self._scaler is None: if self._scaler is None:
@@ -77,9 +90,9 @@ class PODBlock(torch.nn.Module):
@property @property
def scale_coefficients(self): def scale_coefficients(self):
""" """
If True, the coefficients are scaled after the projection to have zero The flag indicating if the coefficients are scaled after the projection.
mean and unit variance.
:return: The flag indicating if the coefficients are scaled.
:rtype: bool :rtype: bool
""" """
return self.__scale_coefficients return self.__scale_coefficients
@@ -87,10 +100,10 @@ class PODBlock(torch.nn.Module):
def fit(self, X, randomized=True): def fit(self, X, randomized=True):
""" """
Set the POD basis by performing the singular value decomposition of the Set the POD basis by performing the singular value decomposition of the
given tensor. If `self.scale_coefficients` is True, the coefficients given tensor. If ``self.scale_coefficients`` is True, the coefficients
are scaled after the projection to have zero mean and unit variance. are scaled after the projection to have zero mean and unit variance.
:param torch.Tensor X: The tensor to be reduced. :param torch.Tensor X: The input tensor to be reduced.
""" """
self._fit_pod(X, randomized) self._fit_pod(X, randomized)
@@ -99,10 +112,8 @@ class PODBlock(torch.nn.Module):
def _fit_scaler(self, coeffs): def _fit_scaler(self, coeffs):
""" """
Private merhod that computes the mean and the standard deviation of the Compute the mean and the standard deviation of the given coefficients,
given coefficients, allowing to scale them to have zero mean and unit which are then stored in ``self._scaler``.
variance. Mean and standard deviation are stored in the private member
`_scaler`.
:param torch.Tensor coeffs: The coefficients to be scaled. :param torch.Tensor coeffs: The coefficients to be scaled.
""" """
@@ -113,8 +124,8 @@ class PODBlock(torch.nn.Module):
def _fit_pod(self, X, randomized): def _fit_pod(self, X, randomized):
""" """
Private method that computes the POD basis of the given tensor and Compute the POD basis of the given tensor, which is then stored in
stores it in the private member `_basis`. ``self._basis``.
:param torch.Tensor X: The tensor to be reduced. :param torch.Tensor X: The tensor to be reduced.
""" """
@@ -136,9 +147,7 @@ class PODBlock(torch.nn.Module):
def forward(self, X): def forward(self, X):
""" """
The forward pass of the POD layer. By default it executes the The forward pass of the POD layer.
:meth:`reduce` method, reducing the input tensor to its POD
representation. The POD layer needs to be fitted before being used.
:param torch.Tensor X: The input tensor to be reduced. :param torch.Tensor X: The input tensor to be reduced.
:return: The reduced tensor. :return: The reduced tensor.
@@ -148,10 +157,11 @@ class PODBlock(torch.nn.Module):
def reduce(self, X): def reduce(self, X):
""" """
Reduce the input tensor to its POD representation. The POD layer needs Reduce the input tensor to its POD representation. The POD layer must
to be fitted before being used. be fitted before being used.
:param torch.Tensor X: The input tensor to be reduced. :param torch.Tensor X: The input tensor to be reduced.
:raises RuntimeError: If the POD layer is not fitted.
:return: The reduced tensor. :return: The reduced tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
@@ -176,6 +186,7 @@ class PODBlock(torch.nn.Module):
to be fitted before being used. to be fitted before being used.
:param torch.Tensor coeff: The coefficients to be expanded. :param torch.Tensor coeff: The coefficients to be expanded.
:raises RuntimeError: If the POD layer is not fitted.
:return: The expanded tensor. :return: The expanded tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """

View File

@@ -1,4 +1,4 @@
"""Module for Radial Basis Function Interpolation layer.""" """Module for the Radial Basis Function Interpolation layer."""
import math import math
import warnings import warnings
@@ -10,6 +10,10 @@ from ...utils import check_consistency
def linear(r): def linear(r):
""" """
Linear radial basis function. Linear radial basis function.
:param torch.Tensor r: Distance between points.
:return: The linear radial basis function.
:rtype: torch.Tensor
""" """
return -r return -r
@@ -17,6 +21,11 @@ def linear(r):
def thin_plate_spline(r, eps=1e-7): def thin_plate_spline(r, eps=1e-7):
""" """
Thin plate spline radial basis function. Thin plate spline radial basis function.
:param torch.Tensor r: Distance between points.
:param float eps: Small value to avoid log(0).
:return: The thin plate spline radial basis function.
:rtype: torch.Tensor
""" """
r = torch.clamp(r, min=eps) r = torch.clamp(r, min=eps)
return r**2 * torch.log(r) return r**2 * torch.log(r)
@@ -25,6 +34,10 @@ def thin_plate_spline(r, eps=1e-7):
def cubic(r): def cubic(r):
""" """
Cubic radial basis function. Cubic radial basis function.
:param torch.Tensor r: Distance between points.
:return: The cubic radial basis function.
:rtype: torch.Tensor
""" """
return r**3 return r**3
@@ -32,6 +45,10 @@ def cubic(r):
def quintic(r): def quintic(r):
""" """
Quintic radial basis function. Quintic radial basis function.
:param torch.Tensor r: Distance between points.
:return: The quintic radial basis function.
:rtype: torch.Tensor
""" """
return -(r**5) return -(r**5)
@@ -39,6 +56,10 @@ def quintic(r):
def multiquadric(r): def multiquadric(r):
""" """
Multiquadric radial basis function. Multiquadric radial basis function.
:param torch.Tensor r: Distance between points.
:return: The multiquadric radial basis function.
:rtype: torch.Tensor
""" """
return -torch.sqrt(r**2 + 1) return -torch.sqrt(r**2 + 1)
@@ -46,6 +67,10 @@ def multiquadric(r):
def inverse_multiquadric(r): def inverse_multiquadric(r):
""" """
Inverse multiquadric radial basis function. Inverse multiquadric radial basis function.
:param torch.Tensor r: Distance between points.
:return: The inverse multiquadric radial basis function.
:rtype: torch.Tensor
""" """
return 1 / torch.sqrt(r**2 + 1) return 1 / torch.sqrt(r**2 + 1)
@@ -53,6 +78,10 @@ def inverse_multiquadric(r):
def inverse_quadratic(r): def inverse_quadratic(r):
""" """
Inverse quadratic radial basis function. Inverse quadratic radial basis function.
:param torch.Tensor r: Distance between points.
:return: The inverse quadratic radial basis function.
:rtype: torch.Tensor
""" """
return 1 / (r**2 + 1) return 1 / (r**2 + 1)
@@ -60,6 +89,10 @@ def inverse_quadratic(r):
def gaussian(r): def gaussian(r):
""" """
Gaussian radial basis function. Gaussian radial basis function.
:param torch.Tensor r: Distance between points.
:return: The gaussian radial basis function.
:rtype: torch.Tensor
""" """
return torch.exp(-(r**2)) return torch.exp(-(r**2))
@@ -88,13 +121,14 @@ min_degree_funcs = {
class RBFBlock(torch.nn.Module): class RBFBlock(torch.nn.Module):
""" """
Radial Basis Function (RBF) interpolation layer. It need to be fitted with Radial Basis Function (RBF) interpolation layer.
the data with the method :meth:`fit`, before it can be used to interpolate
new points. The layer is not trainable. The user needs to fit the model with the data, before using it to
interpolate new points. The layer is not trainable.
.. note:: .. note::
It reproduces the implementation of ``scipy.interpolate.RBFBlock`` and It reproduces the implementation of :class:`scipy.interpolate.RBFBlock`
it is inspired from the implementation in `torchrbf. and it is inspired from the implementation in `torchrbf.
<https://github.com/ArmanMaesumi/torchrbf>`_ <https://github.com/ArmanMaesumi/torchrbf>`_
""" """
@@ -107,24 +141,25 @@ class RBFBlock(torch.nn.Module):
degree=None, degree=None,
): ):
""" """
:param int neighbors: Number of neighbors to use for the Initialization of the :class:`RBFBlock` class.
interpolation.
If ``None``, use all data points. :param int neighbors: The number of neighbors used for interpolation.
:param float smoothing: Smoothing parameter for the interpolation. If ``None``, all data are used.
if 0.0, the interpolation is exact and no smoothing is applied. :param float smoothing: The moothing parameter for the interpolation.
:param str kernel: Radial basis function to use. Must be one of If ``0.0``, the interpolation is exact and no smoothing is applied.
``linear``, ``thin_plate_spline``, ``cubic``, ``quintic``, :param str kernel: The radial basis function to use.
``multiquadric``, ``inverse_multiquadric``, ``inverse_quadratic``, The available kernels are: ``linear``, ``thin_plate_spline``,
or ``gaussian``. ``cubic``, ``quintic``, ``multiquadric``, ``inverse_multiquadric``,
:param float epsilon: Shape parameter that scaled the input to ``inverse_quadratic``, or ``gaussian``.
the RBF. This defaults to 1 for kernels in ``scale_invariant`` :param float epsilon: The shape parameter that scales the input to the
dictionary, and must be specified for other kernels. RBF. Default is ``1`` for kernels in the ``scale_invariant``
:param int degree: Degree of the added polynomial. dictionary, while it must be specified for other kernels.
For some kernels, there exists a minimum degree of the polynomial :param int degree: The degree of the polynomial. Some kernels require a
such that the RBF is well-posed. Those minimum degrees are specified minimum degree of the polynomial to ensure that the RBF is well
in the `min_degree_funcs` dictionary above. If `degree` is less than defined. These minimum degrees are specified in the
the minimum degree, a warning is raised and the degree is set to the ``min_degree_funcs`` dictionary. If ``degree`` is less than the
minimum value. minimum degree required, a warning is raised and the degree is set
to the minimum value.
""" """
super().__init__() super().__init__()
@@ -151,27 +186,39 @@ class RBFBlock(torch.nn.Module):
@property @property
def smoothing(self): def smoothing(self):
""" """
Smoothing parameter for the interpolation. The smoothing parameter for the interpolation.
:return: The smoothing parameter.
:rtype: float :rtype: float
""" """
return self._smoothing return self._smoothing
@smoothing.setter @smoothing.setter
def smoothing(self, value): def smoothing(self, value):
"""
Set the smoothing parameter for the interpolation.
:param float value: The smoothing parameter.
"""
self._smoothing = value self._smoothing = value
@property @property
def kernel(self): def kernel(self):
""" """
Radial basis function to use. The Radial basis function.
:return: The radial basis function.
:rtype: str :rtype: str
""" """
return self._kernel return self._kernel
@kernel.setter @kernel.setter
def kernel(self, value): def kernel(self, value):
"""
Set the radial basis function.
:param str value: The radial basis function.
"""
if value not in radial_functions: if value not in radial_functions:
raise ValueError(f"Unknown kernel: {value}") raise ValueError(f"Unknown kernel: {value}")
self._kernel = value.lower() self._kernel = value.lower()
@@ -179,14 +226,22 @@ class RBFBlock(torch.nn.Module):
@property @property
def epsilon(self): def epsilon(self):
""" """
Shape parameter that scaled the input to the RBF. The shape parameter that scales the input to the RBF.
:return: The shape parameter.
:rtype: float :rtype: float
""" """
return self._epsilon return self._epsilon
@epsilon.setter @epsilon.setter
def epsilon(self, value): def epsilon(self, value):
"""
Set the shape parameter.
:param float value: The shape parameter.
:raises ValueError: If the kernel requires an epsilon and it is not
specified.
"""
if value is None: if value is None:
if self.kernel in scale_invariant: if self.kernel in scale_invariant:
value = 1.0 value = 1.0
@@ -199,14 +254,23 @@ class RBFBlock(torch.nn.Module):
@property @property
def degree(self): def degree(self):
""" """
Degree of the added polynomial. The degree of the polynomial.
:return: The degree of the polynomial.
:rtype: int :rtype: int
""" """
return self._degree return self._degree
@degree.setter @degree.setter
def degree(self, value): def degree(self, value):
"""
Set the degree of the polynomial.
:param int value: The degree of the polynomial.
:raises UserWarning: If the degree is less than the minimum required
for the kernel.
:raises ValueError: If the degree is less than -1.
"""
min_degree = min_degree_funcs.get(self.kernel, -1) min_degree = min_degree_funcs.get(self.kernel, -1)
if value is None: if value is None:
value = max(min_degree, 0) value = max(min_degree, 0)
@@ -223,6 +287,13 @@ class RBFBlock(torch.nn.Module):
self._degree = value self._degree = value
def _check_data(self, y, d): def _check_data(self, y, d):
"""
Check the data consistency.
:param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: The tensor of data values.
:raises ValueError: If the data is not consistent.
"""
if y.ndim != 2: if y.ndim != 2:
raise ValueError("y must be a 2-dimensional tensor.") raise ValueError("y must be a 2-dimensional tensor.")
@@ -241,8 +312,11 @@ class RBFBlock(torch.nn.Module):
""" """
Fit the RBF interpolator to the data. Fit the RBF interpolator to the data.
:param torch.Tensor y: (n, d) tensor of data points. :param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: (n, m) tensor of data values. :param torch.Tensor d: The tensor of data values.
:raises NotImplementedError: If the neighbors are not ``None``.
:raises ValueError: If the data is not compatible with the requested
degree.
""" """
self._check_data(y, d) self._check_data(y, d)
@@ -252,7 +326,7 @@ class RBFBlock(torch.nn.Module):
if self.neighbors is None: if self.neighbors is None:
nobs = self.y.shape[0] nobs = self.y.shape[0]
else: else:
raise NotImplementedError("neighbors currently not supported") raise NotImplementedError("Neighbors currently not supported")
powers = RBFBlock.monomial_powers(self.y.shape[1], self.degree).to( powers = RBFBlock.monomial_powers(self.y.shape[1], self.degree).to(
y.device y.device
@@ -276,12 +350,14 @@ class RBFBlock(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Returns the interpolated data at the given points `x`. Forward pass.
:param torch.Tensor x: `(n, d)` tensor of points at which :param torch.Tensor x: The tensor of points to interpolate.
to query the interpolator :raises ValueError: If the input is not a 2-dimensional tensor.
:raises ValueError: If the second dimension of the input is not the same
:rtype: `(n, m)` torch.Tensor of interpolated data. as the second dimension of the data.
:return: The interpolated data.
:rtype: torch.Tensor
""" """
if x.ndim != 2: if x.ndim != 2:
raise ValueError("`x` must be a 2-dimensional tensor.") raise ValueError("`x` must be a 2-dimensional tensor.")
@@ -309,25 +385,25 @@ class RBFBlock(torch.nn.Module):
@staticmethod @staticmethod
def kernel_vector(x, y, kernel_func): def kernel_vector(x, y, kernel_func):
""" """
Evaluate radial functions with centers `y` for all points in `x`. Evaluate for all points ``x`` the radial functions with center ``y``.
:param torch.Tensor x: `(n, d)` tensor of points. :param torch.Tensor x: The tensor of points.
:param torch.Tensor y: `(m, d)` tensor of centers. :param torch.Tensor y: The tensor of centers.
:param str kernel_func: Radial basis function to use. :param str kernel_func: Radial basis function to use.
:return: The radial function values.
:rtype: `(n, m)` torch.Tensor of radial function values. :rtype: torch.Tensor
""" """
return kernel_func(torch.cdist(x, y)) return kernel_func(torch.cdist(x, y))
@staticmethod @staticmethod
def polynomial_matrix(x, powers): def polynomial_matrix(x, powers):
""" """
Evaluate monomials at `x` with given `powers`. Evaluate monomials of power ``powers`` at points ``x``.
:param torch.Tensor x: `(n, d)` tensor of points. :param torch.Tensor x: The tensor of points.
:param torch.Tensor powers: `(r, d)` tensor of powers for each monomial. :param torch.Tensor powers: The tensor of powers for each monomial.
:return: The monomial values.
:rtype: `(n, r)` torch.Tensor of monomial values. :rtype: torch.Tensor
""" """
x_ = torch.repeat_interleave(x, repeats=powers.shape[0], dim=0) x_ = torch.repeat_interleave(x, repeats=powers.shape[0], dim=0)
powers_ = powers.repeat(x.shape[0], 1) powers_ = powers.repeat(x.shape[0], 1)
@@ -336,12 +412,12 @@ class RBFBlock(torch.nn.Module):
@staticmethod @staticmethod
def kernel_matrix(x, kernel_func): def kernel_matrix(x, kernel_func):
""" """
Returns radial function values for all pairs of points in `x`. Return the radial function values for all pairs of points in ``x``.
:param torch.Tensor x: `(n, d`) tensor of points. :param torch.Tensor x: The tensor of points.
:param str kernel_func: Radial basis function to use. :param str kernel_func: The radial basis function to use.
:return: The radial function values.
:rtype: `(n, n`) torch.Tensor of radial function values. :rtype: torch.Tensor
""" """
return kernel_func(torch.cdist(x, x)) return kernel_func(torch.cdist(x, x))
@@ -350,12 +426,10 @@ class RBFBlock(torch.nn.Module):
""" """
Return the powers for each monomial in a polynomial. Return the powers for each monomial in a polynomial.
:param int ndim: Number of variables in the polynomial. :param int ndim: The number of variables in the polynomial.
:param int degree: Degree of the polynomial. :param int degree: The degree of the polynomial.
:return: The powers for each monomial.
:rtype: `(nmonos, ndim)` torch.Tensor where each row contains the powers :rtype: torch.Tensor
for each variable in a monomial.
""" """
nmonos = math.comb(degree + ndim, ndim) nmonos = math.comb(degree + ndim, ndim)
out = torch.zeros((nmonos, ndim), dtype=torch.int32) out = torch.zeros((nmonos, ndim), dtype=torch.int32)
@@ -372,16 +446,16 @@ class RBFBlock(torch.nn.Module):
""" """
Build the RBF linear system. Build the RBF linear system.
:param torch.Tensor y: (n, d) tensor of data points. :param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: (n, m) tensor of data values. :param torch.Tensor d: The tensor of data values.
:param torch.Tensor smoothing: (n,) tensor of smoothing parameters. :param torch.Tensor smoothing: The tensor of smoothing parameters.
:param str kernel: Radial basis function to use. :param str kernel: The radial basis function to use.
:param float epsilon: Shape parameter that scaled the input to the RBF. :param float epsilon: The shape parameter that scales the input to the
:param torch.Tensor powers: (r, d) tensor of powers for each monomial. RBF.
:param torch.Tensor powers: The tensor of powers for each monomial.
:rtype: (lhs, rhs, shift, scale) where `lhs` and `rhs` are the :return: The left-hand side and right-hand side of the linear system,
left-hand side and right-hand side of the linear system, and and the shift and scale parameters.
`shift` and `scale` are the shift and scale parameters. :rtype: tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
""" """
p = d.shape[0] p = d.shape[0]
s = d.shape[1] s = d.shape[1]
@@ -413,21 +487,20 @@ class RBFBlock(torch.nn.Module):
@staticmethod @staticmethod
def solve(y, d, smoothing, kernel, epsilon, powers): def solve(y, d, smoothing, kernel, epsilon, powers):
""" """
Build then solve the RBF linear system. Build and solve the RBF linear system.
:param torch.Tensor y: (n, d) tensor of data points. :param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: (n, m) tensor of data values. :param torch.Tensor d: The tensor of data values.
:param torch.Tensor smoothing: (n,) tensor of smoothing parameters. :param torch.Tensor smoothing: The tensor of smoothing parameters.
:param str kernel: Radial basis function to use.
:param float epsilon: Shape parameter that scaled the input to the RBF.
:param torch.Tensor powers: (r, d) tensor of powers for each monomial.
:param str kernel: The radial basis function to use.
:param float epsilon: The shape parameter that scaled the input to the
RBF.
:param torch.Tensor powers: The tensor of powers for each monomial.
:raises ValueError: If the linear system is singular. :raises ValueError: If the linear system is singular.
:return: The shift and scale parameters, and the coefficients of the
:rtype: (shift, scale, coeffs) where `shift` and `scale` are the interpolator.
shift and scale parameters, and `coeffs` are the coefficients :rtype: tuple[torch.Tensor, torch.Tensor, torch.Tensor]
of the interpolator
""" """
lhs, rhs, shift, scale = RBFBlock.build( lhs, rhs, shift, scale = RBFBlock.build(

View File

@@ -1,5 +1,5 @@
""" """
TODO: Add title. Module for residual blocks and enhanced linear layers.
""" """
import torch import torch
@@ -8,16 +8,16 @@ from ...utils import check_consistency
class ResidualBlock(nn.Module): class ResidualBlock(nn.Module):
"""Residual block base class. Implementation of a residual block. """
Residual block class.
.. seealso:: .. seealso::
**Original reference**: He, Kaiming, et al. **Original reference**: He, Kaiming, et al.
*Deep residual learning for image recognition.* *Deep residual learning for image recognition.*
Proceedings of the IEEE conference on computer vision Proceedings of the IEEE conference on computer vision and pattern
and pattern recognition. 2016.. recognition. 2016.
DOI: `<https://arxiv.org/pdf/1512.03385.pdf>`_. DOI: `<https://arxiv.org/pdf/1512.03385.pdf>`_.
""" """
def __init__( def __init__(
@@ -29,18 +29,15 @@ class ResidualBlock(nn.Module):
activation=torch.nn.ReLU(), activation=torch.nn.ReLU(),
): ):
""" """
Initializes the ResidualBlock module. Initialization of the :class:`ResidualBlock` class.
:param int input_dim: Dimension of the input to pass to the :param int input_dim: The input dimension.
feedforward linear layer. :param int output_dim: The output dimension.
:param int output_dim: Dimension of the output from the :param int hidden_dim: The hidden dimension.
residual layer. :param bool spectral_norm: If ``True``, the spectral normalization is
:param int hidden_dim: Hidden dimension for mapping the input applied to the feedforward layers. Default is ``False``.
(first block). :param torch.nn.Module activation: The activation function.
:param bool spectral_norm: Apply spectral normalization to feedforward Default is :class:`torch.nn.ReLU`.
layers, defaults to False.
:param torch.nn.Module activation: Cctivation function after first
block.
""" """
super().__init__() super().__init__()
@@ -64,10 +61,11 @@ class ResidualBlock(nn.Module):
self._l3 = self._spect_norm(nn.Linear(input_dim, output_dim)) self._l3 = self._spect_norm(nn.Linear(input_dim, output_dim))
def forward(self, x): def forward(self, x):
"""Forward pass for residual block layer. """
Forward pass.
:param torch.Tensor x: Input tensor for the residual layer. :param torch.Tensor x: The input tensor.
:return: Output tensor for the residual layer. :return: The output tensor.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
y = self._activation(self._l1(x)) y = self._activation(self._l1(x))
@@ -76,10 +74,10 @@ class ResidualBlock(nn.Module):
return y + x return y + x
def _spect_norm(self, x): def _spect_norm(self, x):
"""Perform spectral norm on the layers. """
Perform spectral normalization on the network layers.
:param x: A torch.nn.Module Linear layer :param torch.nn.Module x: A :class:`torch.nn.Linear` layer.
:type x: torch.nn.Module
:return: The spectral norm of the layer :return: The spectral norm of the layer
:rtype: torch.nn.Module :rtype: torch.nn.Module
""" """
@@ -88,37 +86,31 @@ class ResidualBlock(nn.Module):
class EnhancedLinear(torch.nn.Module): class EnhancedLinear(torch.nn.Module):
""" """
A wrapper class for enhancing a linear layer with activation and/or dropout. Enhanced Linear layer class.
:param layer: The linear layer to be enhanced. This class is a wrapper for enhancing a linear layer with activation and/or
:type layer: torch.nn.Module dropout.
:param activation: The activation function to be applied after the linear
layer.
:type activation: torch.nn.Module
:param dropout: The dropout probability to be applied after the activation
(if provided).
:type dropout: float
:Example:
>>> linear_layer = torch.nn.Linear(10, 20)
>>> activation = torch.nn.ReLU()
>>> dropout_prob = 0.5
>>> enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob)
""" """
def __init__(self, layer, activation=None, dropout=None): def __init__(self, layer, activation=None, dropout=None):
""" """
Initializes the EnhancedLinear module. Initialization of the :class:`EnhancedLinear` class.
:param layer: The linear layer to be enhanced. :param torch.nn.Module layer: The linear layer to be enhanced.
:type layer: torch.nn.Module :param torch.nn.Module activation: The activation function. Default is
:param activation: The activation function to be applied after the ``None``.
linear layer. :param float dropout: The dropout probability. Default is ``None``.
:type activation: torch.nn.Module
:param dropout: The dropout probability to be applied after the :Example:
activation (if provided).
:type dropout: float >>> linear_layer = torch.nn.Linear(10, 20)
>>> activation = torch.nn.ReLU()
>>> dropout_prob = 0.5
>>> enhanced_linear = EnhancedLinear(
... linear_layer,
... activation,
... dropout_prob
... )
""" """
super().__init__() super().__init__()
@@ -146,23 +138,19 @@ class EnhancedLinear(torch.nn.Module):
def forward(self, x): def forward(self, x):
""" """
Forward pass through the enhanced linear module. Forward pass.
:param x: Input tensor. :param torch.Tensor x: The input tensor.
:type x: torch.Tensor :return: The output tensor.
:return: Output tensor after passing through the enhanced linear module.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return self._model(x) return self._model(x)
def _drop(self, p): def _drop(self, p):
""" """
Applies dropout with probability p. Apply dropout with probability p.
:param p: Dropout probability.
:type p: float
:param float p: Dropout probability.
:return: Dropout layer with the specified probability. :return: Dropout layer with the specified probability.
:rtype: torch.nn.Dropout :rtype: torch.nn.Dropout
""" """

View File

@@ -1,5 +1,5 @@
""" """
TODO: Add title. Module for spectral convolution blocks.
""" """
import torch import torch
@@ -10,24 +10,23 @@ from ...utils import check_consistency
######## 1D Spectral Convolution ########### ######## 1D Spectral Convolution ###########
class SpectralConvBlock1D(nn.Module): class SpectralConvBlock1D(nn.Module):
""" """
PINA implementation of Spectral Convolution Block for one Spectral Convolution Block for one-dimensional tensors.
dimensional tensors.
This class computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space.
The block expects an input of size [``batch``, ``input_numb_fields``, ``N``]
and returns an output of size [``batch``, ``output_numb_fields``, ``N``].
""" """
def __init__(self, input_numb_fields, output_numb_fields, n_modes): def __init__(self, input_numb_fields, output_numb_fields, n_modes):
""" r"""
The module computes the spectral convolution of the input with a linear Initialization of the :class:`SpectralConvBlock1D` class.
kernel in the
fourier space, and then it maps the input back to the physical
space.
The block expects an input of size ``[batch, input_numb_fields, N]``
and returns an output of size ``[batch, output_numb_fields, N]``.
:param int input_numb_fields: The number of channels for the input. :param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output. :param int output_numb_fields: The number of channels for the output.
:param int n_modes: Number of modes to select, it must be at most equal :param int n_modes: The number of modes to select for each dimension.
to the ``floor(N/2)+1``. It must be at most equal to :math:`\floor(Nx/2)+1`.
""" """
super().__init__() super().__init__()
@@ -54,30 +53,26 @@ class SpectralConvBlock1D(nn.Module):
def _compute_mult1d(self, input, weights): def _compute_mult1d(self, input, weights):
""" """
Compute the matrix multiplication of the input Compute the matrix multiplication of the input and the linear kernel
with the linear kernel weights. weights.
:param input: The input tensor, expect of size :param torch.Tensor input: The input tensor. Expected of size
``[batch, input_numb_fields, x]``. [``batch``, ``input_numb_fields``, ``N``].
:type input: torch.Tensor :param torch.Tensor weights: The kernel weights. Expected of size
:param weights: The kernel weights, expect of [``input_numb_fields``, ``output_numb_fields``, ``N``].
size ``[input_numb_fields, output_numb_fields, x]``. :return: The result of the matrix multiplication.
:type weights: torch.Tensor
:return: The matrix multiplication of the input
with the linear kernel weights.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return torch.einsum("bix,iox->box", input, weights) return torch.einsum("bix,iox->box", input, weights)
def forward(self, x): def forward(self, x):
""" """
Forward computation for Spectral Convolution. Forward pass.
:param x: The input tensor, expect of size :param torch.Tensor x: The input tensor. Expected of size
``[batch, input_numb_fields, x]``. [``batch``, ``input_numb_fields``, ``N``].
:type x: torch.Tensor :return: The input tensor. Expected of size
:return: The output tensor obtained from the [``batch``, ``output_numb_fields``, ``N``].
spectral convolution of size ``[batch, output_numb_fields, x]``.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
batch_size = x.shape[0] batch_size = x.shape[0]
@@ -104,26 +99,29 @@ class SpectralConvBlock1D(nn.Module):
######## 2D Spectral Convolution ########### ######## 2D Spectral Convolution ###########
class SpectralConvBlock2D(nn.Module): class SpectralConvBlock2D(nn.Module):
""" """
PINA implementation of spectral convolution block for two Spectral Convolution Block for two-dimensional tensors.
dimensional tensors.
This class computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``]
and returns an output of size
[``batch``, ``output_numb_fields``, ``Nx``, ``Ny``].
""" """
def __init__(self, input_numb_fields, output_numb_fields, n_modes): def __init__(self, input_numb_fields, output_numb_fields, n_modes):
""" r"""
The module computes the spectral convolution of the input with a linear Initialization of the :class:`SpectralConvBlock2D` class.
kernel in the
fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny]``
and returns an output of size ``[batch, output_numb_fields, Nx, Ny]``.
:param int input_numb_fields: The number of channels for the input. :param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output. :param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each :param n_modes: The number of modes to select for each dimension.
dimension. It must be at most equal to the ``floor(Nx/2)+1`` and It must be at most equal to :math:`\floor(Nx/2)+1`,
``floor(Ny/2)+1``. :math:`\floor(Ny/2)+1`.
:type n_modes: list[int] | tuple[int]
:raises ValueError: If the number of modes is not consistent.
:raises ValueError: If the number of modes is not a list or tuple.
""" """
super().__init__() super().__init__()
@@ -178,30 +176,26 @@ class SpectralConvBlock2D(nn.Module):
def _compute_mult2d(self, input, weights): def _compute_mult2d(self, input, weights):
""" """
Compute the matrix multiplication of the input Compute the matrix multiplication of the input and the linear kernel
with the linear kernel weights. weights.
:param input: The input tensor, expect of size :param torch.Tensor input: The input tensor. Expected of size
``[batch, input_numb_fields, x, y]``. [``batch``, ``input_numb_fields``, ``Nx``, ``Ny``].
:type input: torch.Tensor :param torch.Tensor weights: The kernel weights. Expected of size
:param weights: The kernel weights, expect of [``input_numb_fields``, ``output_numb_fields``, ``Nx``, ``Ny``].
size ``[input_numb_fields, output_numb_fields, x, y]``. :return: The result of the matrix multiplication.
:type weights: torch.Tensor
:return: The matrix multiplication of the input
with the linear kernel weights.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return torch.einsum("bixy,ioxy->boxy", input, weights) return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x): def forward(self, x):
""" """
Forward computation for Spectral Convolution. Forward pass.
:param x: The input tensor, expect of size :param torch.Tensor x: The input tensor. Expected of size
``[batch, input_numb_fields, x, y]``. [``batch``, ``input_numb_fields``, ``Nx``, ``Ny``].
:type x: torch.Tensor :return: The input tensor. Expected of size
:return: The output tensor obtained from the [``batch``, ``output_numb_fields``, ``Nx``, ``Ny``].
spectral convolution of size ``[batch, output_numb_fields, x, y]``.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
@@ -235,27 +229,29 @@ class SpectralConvBlock2D(nn.Module):
######## 3D Spectral Convolution ########### ######## 3D Spectral Convolution ###########
class SpectralConvBlock3D(nn.Module): class SpectralConvBlock3D(nn.Module):
""" """
PINA implementation of spectral convolution block for three Spectral Convolution Block for three-dimensional tensors.
dimensional tensors.
This class computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``, ``Nz``]
and returns an output of size
[``batch``, ``output_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
""" """
def __init__(self, input_numb_fields, output_numb_fields, n_modes): def __init__(self, input_numb_fields, output_numb_fields, n_modes):
""" r"""
The module computes the spectral convolution of the input with a Initialization of the :class:`SpectralConvBlock3D` class.
linear kernel in the
fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny, Nz]``
and returns an output of size
``[batch, output_numb_fields, Nx, Ny, Nz]``.
:param int input_numb_fields: The number of channels for the input. :param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output. :param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each :param n_modes: The number of modes to select for each dimension.
dimension. It must be at most equal to the ``floor(Nx/2)+1``, It must be at most equal to :math:`\floor(Nx/2)+1`,
``floor(Ny/2)+1`` and ``floor(Nz/2)+1``. :math:`\floor(Ny/2)+1`, :math:`\floor(Nz/2)+1`.
:type n_modes: list[int] | tuple[int]
:raises ValueError: If the number of modes is not consistent.
:raises ValueError: If the number of modes is not a list or tuple.
""" """
super().__init__() super().__init__()
@@ -334,31 +330,27 @@ class SpectralConvBlock3D(nn.Module):
def _compute_mult3d(self, input, weights): def _compute_mult3d(self, input, weights):
""" """
Compute the matrix multiplication of the input Compute the matrix multiplication of the input and the linear kernel
with the linear kernel weights. weights.
:param input: The input tensor, expect of size :param torch.Tensor input: The input tensor. Expected of size
``[batch, input_numb_fields, x, y, z]``. [``batch``, ``input_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
:type input: torch.Tensor :param torch.Tensor weights: The kernel weights. Expected of size
:param weights: The kernel weights, expect of [``input_numb_fields``, ``output_numb_fields``, ``Nx``, ``Ny``,
size ``[input_numb_fields, output_numb_fields, x, y, z]``. ``Nz``].
:type weights: torch.Tensor :return: The result of the matrix multiplication.
:return: The matrix multiplication of the input
with the linear kernel weights.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """
return torch.einsum("bixyz,ioxyz->boxyz", input, weights) return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def forward(self, x): def forward(self, x):
""" """
Forward computation for Spectral Convolution. Forward pass.
:param x: The input tensor, expect of size :param torch.Tensor x: The input tensor. Expected of size
``[batch, input_numb_fields, x, y, z]``. [``batch``, ``input_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
:type x: torch.Tensor :return: The input tensor. Expected of size
:return: The output tensor obtained from the [``batch``, ``output_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
spectral convolution of size
``[batch, output_numb_fields, x, y, z]``.
:rtype: torch.Tensor :rtype: torch.Tensor
""" """

View File

@@ -1,5 +1,5 @@
""" """
TODO: Add description Module for the Stride class.
""" """
import torch import torch
@@ -7,14 +7,16 @@ import torch
class Stride: class Stride:
""" """
TODO Stride class for continous convolution.
""" """
def __init__(self, dict_): def __init__(self, dict_):
"""Stride class for continous convolution """
Initialization of the :class:`Stride` class.
:param param: type of continuous convolution :param dict dict_: Dictionary having as keys the domain size ``domain``,
:type param: string the starting position of the filter ``start``, the jump size for the
filter ``jump``, and the direction of the filter ``direction``.
""" """
self._dict_stride = dict_ self._dict_stride = dict_
@@ -22,52 +24,50 @@ class Stride:
self._stride_discrete = self._create_stride_discrete(dict_) self._stride_discrete = self._create_stride_discrete(dict_)
def _create_stride_discrete(self, my_dict): def _create_stride_discrete(self, my_dict):
"""Creating the list for applying the filter """
Create a tensor of positions where to apply the filter.
:param dict my_dict_: Dictionary having as keys the domain size
``domain``, the starting position of the filter ``start``, the jump
size for the filter ``jump``, and the direction of the filter
``direction``.
:raises IndexError: Values in the dict must have all same length.
:raises ValueError: Domain values must be greater than 0.
:raises ValueError: Direction must be either equal to ``1``, ``-1`` or
``0``.
:raises IndexError: Direction and jumps must be zero in the same index.
:return: The positions for the filter
:rtype: torch.Tensor
:param my_dict: Dictionary with the following arguments:
domain size, starting position of the filter, jump size
for the filter and direction of the filter
:type my_dict: dict
:raises IndexError: Values in the dict must have all same length
:raises ValueError: Domain values must be greater than 0
:raises ValueError: Direction must be either equal to 1, -1 or 0
:raises IndexError: Direction and jumps must have zero in the same
index
:return: list of positions for the filter
:rtype: list
:Example: :Example:
>>> stride_dict = {
>>> stride = {"domain": [4, 4], ... "domain": [4, 4],
"start": [-4, 2], ... "start": [-4, 2],
"jump": [2, 2], ... "jump": [2, 2],
"direction": [1, 1], ... "direction": [1, 1],
} ... }
>>> create_stride(stride) >>> Stride(stride_dict)
[[-4.0, 2.0], [-4.0, 4.0], [-2.0, 2.0], [-2.0, 4.0]]
""" """
# we must check boundaries of the input as well # we must check boundaries of the input as well
domain, start, jumps, direction = my_dict.values() domain, start, jumps, direction = my_dict.values()
# checking # checking
if not all(len(s) == len(domain) for s in my_dict.values()): if not all(len(s) == len(domain) for s in my_dict.values()):
raise IndexError("values in the dict must have all same length") raise IndexError("Values in the dict must have all same length")
if not all(v >= 0 for v in domain): if not all(v >= 0 for v in domain):
raise ValueError("domain values must be greater than 0") raise ValueError("Domain values must be greater than 0")
if not all(v in (0, -1, 1) for v in direction): if not all(v in (0, -1, 1) for v in direction):
raise ValueError("direction must be either equal to 1, -1 or 0") raise ValueError("Direction must be either equal to 1, -1 or 0")
seq_jumps = [i for i, e in enumerate(jumps) if e == 0] seq_jumps = [i for i, e in enumerate(jumps) if e == 0]
seq_direction = [i for i, e in enumerate(direction) if e == 0] seq_direction = [i for i, e in enumerate(direction) if e == 0]
if seq_direction != seq_jumps: if seq_direction != seq_jumps:
raise IndexError( raise IndexError(
"direction and jumps must have zero in the same index" "Direction and jumps must have zero in the same index"
) )
if seq_jumps: if seq_jumps:

View File

@@ -1,5 +1,5 @@
""" """
TODO Module for utility functions for the convolutional layer.
""" """
import torch import torch
@@ -7,7 +7,13 @@ import torch
def check_point(x, current_stride, dim): def check_point(x, current_stride, dim):
""" """
TODO Check if the point is in the current stride.
:param torch.Tensor x: The input data.
:param int current_stride: The current stride.
:param int dim: The shape of the filter.
:return: The indeces of the points in the current stride.
:rtype: torch.Tensor
""" """
max_stride = current_stride + dim max_stride = current_stride + dim
indeces = torch.logical_and( indeces = torch.logical_and(
@@ -17,13 +23,12 @@ def check_point(x, current_stride, dim):
def map_points_(x, filter_position): def map_points_(x, filter_position):
"""Mapping function n dimensional case """
The mapping function for n-dimensional case.
:param x: input data of two dimension :param torch.Tensor x: The two-dimensional input data.
:type x: torch.tensor :param list[int] filter_position: The position of the filter.
:param filter_position: position of the filter :return: The data mapped in-place.
:type dim: list[numeric]
:return: data mapped inplace
:rtype: torch.tensor :rtype: torch.tensor
""" """
x.add_(-filter_position) x.add_(-filter_position)
@@ -32,14 +37,20 @@ def map_points_(x, filter_position):
def optimizing(f): def optimizing(f):
"""Decorator for calling a function just once """
Decorator to call the function only once.
:param f: python function :param f: python function
:type f: function :type f: Callable
""" """
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
"""
Wrapper function.
:param args: The arguments of the function.
:param kwargs: The keyword arguments of the function.
"""
if kwargs["type_"] == "forward": if kwargs["type_"] == "forward":
if not wrapper.has_run_inverse: if not wrapper.has_run_inverse:
wrapper.has_run_inverse = True wrapper.has_run_inverse = True

View File

@@ -49,10 +49,10 @@ class GraphNeuralKernel(torch.nn.Module):
:type internal_layers: list[int] | tuple[int] :type internal_layers: list[int] | tuple[int]
:param torch.nn.Module internal_func: The activation function used :param torch.nn.Module internal_func: The activation function used
inside each kernel layer. If ``None``, it uses the inside each kernel layer. If ``None``, it uses the
:class:`torch.nn.Tanh`. activation. Default is ``None``. :class:`torch.nn.Tanh` activation. Default is ``None``.
:param torch.nn.Module external_func: The activation function applied to :param torch.nn.Module external_func: The activation function applied to
the output of the each kernel layer. If ``None``, it uses the the output of the each kernel layer. If ``None``, it uses the
:class:`torch.nn.Tanh`. activation. Default is ``None``. :class:`torch.nn.Tanh` activation. Default is ``None``.
:param bool shared_weights: If ``True``, the weights of each kernel :param bool shared_weights: If ``True``, the weights of each kernel
layer are shared. Default is ``False``. layer are shared. Default is ``False``.
""" """

View File

@@ -64,6 +64,7 @@ class LowRankNeuralOperator(KernelNeuralOperator):
the same length as ``n_layers``. If a single function is passed, it the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one. is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`. Default is :class:`torch.nn.Tanh`.
:type func: torch.nn.Module | list[torch.nn.Module]
:param bool bias: If ``True`` bias is considered for the basis function :param bool bias: If ``True`` bias is considered for the basis function
neural network. Default is ``True``. neural network. Default is ``True``.
:raises ValueError: If the input dimension does not match with the :raises ValueError: If the input dimension does not match with the