fix doc model part 2

This commit is contained in:
giovanni
2025-03-14 16:07:08 +01:00
committed by Nicola Demo
parent 001d1fc9cf
commit f9881a79b5
18 changed files with 887 additions and 851 deletions

View File

@@ -1,5 +1,5 @@
"""
Module containing the building blocks for models.
Module for the building blocks of the neural models.
"""
__all__ = [

View File

@@ -1,4 +1,4 @@
"""Module for Averaging Neural Operator Layer class."""
"""Module for the Averaging Neural Operator Block class."""
import torch
from torch import nn
@@ -7,12 +7,12 @@ from ...utils import check_consistency
class AVNOBlock(nn.Module):
r"""
The PINA implementation of the inner layer of the Averaging Neural Operator.
The inner block of the Averaging Neural Operator.
The operator layer performs an affine transformation where the convolution
is approximated with a local average. Given the input function
:math:`v(x)\in\mathbb{R}^{\rm{emb}}` the layer computes
the operator update :math:`K(v)` as:
:math:`v(x)\in\mathbb{R}^{\rm{emb}}` the layer computes the operator update
:math:`K(v)` as:
.. math::
K(v) = \sigma\left(Wv(x) + b + \frac{1}{|\mathcal{A}|}\int v(y)dy\right)
@@ -28,18 +28,20 @@ class AVNOBlock(nn.Module):
.. seealso::
**Original reference**: Lanthaler S. Li, Z., Kovachki,
Stuart, A. (2020). *The Nonlocal Neural Operator: Universal
Approximation*.
**Original reference**: Lanthaler S., Li, Z., Stuart, A. (2020).
*The Nonlocal Neural Operator: Universal Approximation*.
DOI: `arXiv preprint arXiv:2304.13221.
<https://arxiv.org/abs/2304.13221>`_
"""
def __init__(self, hidden_size=100, func=nn.GELU):
"""
:param int hidden_size: Size of the hidden layer, defaults to 100.
:param func: The activation function, default to nn.GELU.
Initialization of the :class:`AVNOBlock` class.
:param int hidden_size: The size of the hidden layer.
Defaults is ``100``.
:param func: The activation function.
Default is :class:`torch.nn.GELU`.
"""
super().__init__()
@@ -52,17 +54,11 @@ class AVNOBlock(nn.Module):
def forward(self, x):
r"""
Forward pass of the layer, it performs a sum of local average
and an affine transformation of the field.
Forward pass of the block. It performs a sum of local average and an
affine transformation of the field.
:param torch.Tensor x: The input tensor for performing the
computation. It expects a tensor :math:`B \times N \times D`,
where :math:`B` is the batch_size, :math:`N` the number of points
in the mesh, :math:`D` the dimension of the problem. In particular
:math:`D` is the codomain of the function :math:`v`. For example
a scalar function has :math:`D=1`, a 4-dimensional vector function
:math:`D=4`.
:return: The output tensor obtained from Average Neural Operator Block.
:param torch.Tensor x: The input tensor for performing the computation.
:return: The output tensor.
:rtype: torch.Tensor
"""
return self._func(self._nn(x) + torch.mean(x, dim=1, keepdim=True))

View File

@@ -1,4 +1,4 @@
"""Module for Base Continuous Convolution class."""
"""Module for the Base Continuous Convolution class."""
from abc import ABCMeta, abstractmethod
import torch
@@ -7,8 +7,31 @@ from .utils_convolution import optimizing
class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
"""
Abstract class
r"""
Base Class for Continuous Convolution.
The class expects the input to be in the form:
:math:`[B \times N_{in} \times N \times D]`, where :math:`B` is the
batch_size, :math:`N_{in}` is the number of input fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
In particular:
* :math:`D` is the number of spatial variables + 1. The last column must
contain the field value.
* :math:`N_{in}` represents the number of function components.
For instance, a vectorial function :math:`f = [f_1, f_2]` has
:math:`N_{in}=2`.
:Note
A 2-dimensional vector-valued function defined on a 3-dimensional input
evaluated on a 100 points input mesh and batch size of 8 is represented
as a tensor of shape ``[8, 2, 100, 4]``, where the columns
``[:, 0, :, -1]`` and ``[:, 1, :, -1]`` represent the first and second,
components of the function, respectively.
The algorithm returns a tensor of shape:
:math:`[B \times N_{out} \times N \times D]`, where :math:`B` is the
batch_size, :math:`N_{out}` is the number of output fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
"""
def __init__(
@@ -22,56 +45,30 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
no_overlap=False,
):
"""
Base Class for Continuous Convolution.
Initialization of the :class:`BaseContinuousConv` class.
The algorithm expects input to be in the form:
$$[B \times N_{in} \times N \times D]$$
where $B$ is the batch_size, $N_{in}$ is the number of input
fields, $N$ the number of points in the mesh, $D$ the dimension
of the problem. In particular:
* $D$ is the number of spatial variables + 1. The last column must
contain the field value. For example for 2D problems $D=3$ and
the tensor will be something like `[first coordinate, second
coordinate, field value]`.
* $N_{in}$ represents the number of vectorial function presented.
For example a vectorial function $f = [f_1, f_2]$ will have
$N_{in}=2$.
:Note
A 2-dimensional vectorial function $N_{in}=2$ of 3-dimensional
input $D=3+1=4$ with 100 points input mesh and batch size of 8
is represented as a tensor `[8, 2, 100, 4]`, where the columns
`[:, 0, :, -1]` and `[:, 1, :, -1]` represent the first and
second filed value respectively
The algorithm returns a tensor of shape:
$$[B \times N_{out} \times N' \times D]$$
where $B$ is the batch_size, $N_{out}$ is the number of output
fields, $N'$ the number of points in the mesh, $D$ the dimension
of the problem.
:param input_numb_field: number of fields in the input
:type input_numb_field: int
:param output_numb_field: number of fields in the output
:type output_numb_field: int
:param filter_dim: dimension of the filter
:type filter_dim: tuple/ list
:param stride: stride for the filter
:type stride: dict
:param model: neural network for inner parametrization,
defaults to None.
:type model: torch.nn.Module, optional
:param optimize: flag for performing optimization on the continuous
filter, defaults to False. The flag `optimize=True` should be
used only when the scatter datapoints are fixed through the
training. If torch model is in `.eval()` mode, the flag is
automatically set to False always.
:type optimize: bool, optional
:param no_overlap: flag for performing optimization on the transpose
continuous filter, defaults to False. The flag set to `True` should
be used only when the filter positions do not overlap for different
strides. RuntimeError will raise in case of non-compatible strides.
:type no_overlap: bool, optional
:param int input_numb_field: The number of input fields.
:param int output_numb_field: The number of input fields.
:param filter_dim: The shape of the filter.
:type filter_dim: list[int] | tuple[int]
:param dict stride: The stride of the filter.
:param torch.nn.Module model: The neural network for inner
parametrization. Default is ``None``.
:param bool optimize: If ``True``, optimization is performed on the
continuous filter. It should be used only when the training points
are fixed. If ``model`` is in ``eval`` mode, it is reset to
``False``. Default is ``False``.
:param bool no_overlap: If ``True``, optimization is performed on the
transposed continuous filter. It should be used only when the filter
positions do not overlap for different strides.
Default is ``False``.
:raises ValueError: If ``input_numb_field`` is not an integer.
:raises ValueError: If ``output_numb_field`` is not an integer.
:raises ValueError: If ``filter_dim`` is not a list or tuple.
:raises ValueError: If ``stride`` is not a dictionary.
:raises ValueError: If ``optimize`` is not a boolean.
:raises ValueError: If ``no_overlap`` is not a boolean.
:raises NotImplementedError: If ``no_overlap`` is ``True``.
"""
super().__init__()
@@ -119,12 +116,17 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
class DefaultKernel(torch.nn.Module):
"""
TODO
The default kernel.
"""
def __init__(self, input_dim, output_dim):
"""
TODO
Initialization of the :class:`DefaultKernel` class.
:param int input_dim: The input dimension.
:param int output_dim: The output dimension.
:raises ValueError: If ``input_dim`` is not an integer.
:raises ValueError: If ``output_dim`` is not an integer.
"""
super().__init__()
assert isinstance(input_dim, int)
@@ -139,65 +141,93 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
def forward(self, x):
"""
TODO
Forward pass.
:param torch.Tensor x: The input data.
:return: The output data.
:rtype: torch.Tensor
"""
return self._model(x)
@property
def net(self):
"""
TODO
The neural network for inner parametrization.
:return: The neural network.
:rtype: torch.nn.Module
"""
return self._net
@property
def stride(self):
"""
TODO
The stride of the filter.
:return: The stride of the filter.
:rtype: dict
"""
return self._stride
@property
def filter_dim(self):
"""
TODO
The shape of the filter.
:return: The shape of the filter.
:rtype: torch.Tensor
"""
return self._dim
@property
def input_numb_field(self):
"""
TODO
The number of input fields.
:return: The number of input fields.
:rtype: int
"""
return self._input_numb_field
@property
def output_numb_field(self):
"""
TODO
The number of output fields.
:return: The number of output fields.
:rtype: int
"""
return self._output_numb_field
@abstractmethod
def forward(self, X):
"""
TODO
Forward pass.
:param torch.Tensor X: The input data.
"""
@abstractmethod
def transpose_overlap(self, X):
"""
TODO
Transpose the convolution with overlap.
:param torch.Tensor X: The input data.
"""
@abstractmethod
def transpose_no_overlap(self, X):
"""
TODO
Transpose the convolution without overlap.
:param torch.Tensor X: The input data.
"""
@abstractmethod
def _initialize_convolution(self, X, type_):
"""
TODO
Initialize the convolution.
:param torch.Tensor X: The input data.
:param str type_: The type of initialization.
"""

View File

@@ -7,30 +7,27 @@ from .integral import Integral
class ContinuousConvBlock(BaseContinuousConv):
"""
Implementation of Continuous Convolutional operator.
The algorithm expects input to be in the form:
:math:`[B, N_{in}, N, D]`
where :math:`B` is the batch_size, :math:`N_{in}` is the number of input
fields, :math:`N` the number of points in the mesh, :math:`D` the dimension
of the problem. In particular:
r"""
Continuous Convolutional block.
The class expects the input to be in the form:
:math:`[B \times N_{in} \times N \times D]`, where :math:`B` is the
batch_size, :math:`N_{in}` is the number of input fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
In particular:
* :math:`D` is the number of spatial variables + 1. The last column must
contain the field value. For example for 2D problems :math:`D=3` and
the tensor will be something like ``[first coordinate, second
coordinate, field value]``.
* :math:`N_{in}` represents the number of vectorial function presented.
For example a vectorial function :math:`f = [f_1, f_2]` will have
contain the field value.
* :math:`N_{in}` represents the number of function components.
For instance, a vectorial function :math:`f = [f_1, f_2]` has
:math:`N_{in}=2`.
.. seealso::
**Original reference**: Coscia, D., Meneghetti, L., Demo, N. et al.
*A continuous convolutional trainable filter for modelling
unstructured data*. Comput Mech 72, 253265 (2023).
**Original reference**:
Coscia, D., Meneghetti, L., Demo, N. et al.
*A continuous convolutional trainable filter for modelling unstructured
data*. Comput Mech 72, 253-265 (2023).
DOI `<https://doi.org/10.1007/s00466-023-02291-1>`_
"""
def __init__(
@@ -44,53 +41,48 @@ class ContinuousConvBlock(BaseContinuousConv):
no_overlap=False,
):
"""
:param input_numb_field: Number of fields :math:`N_{in}` in the input.
:type input_numb_field: int
:param output_numb_field: Number of fields :math:`N_{out}` in the
output.
:type output_numb_field: int
:param filter_dim: Dimension of the filter.
:type filter_dim: tuple(int) | list(int)
:param stride: Stride for the filter.
:type stride: dict
:param model: Neural network for inner parametrization,
defaults to ``None``. If None, a default multilayer perceptron
of width three and size twenty with ReLU activation is used.
:type model: torch.nn.Module
:param optimize: Flag for performing optimization on the continuous
filter, defaults to False. The flag `optimize=True` should be
used only when the scatter datapoints are fixed through the
training. If torch model is in ``.eval()`` mode, the flag is
automatically set to False always.
:type optimize: bool
:param no_overlap: Flag for performing optimization on the transpose
continuous filter, defaults to False. The flag set to `True` should
be used only when the filter positions do not overlap for different
strides. RuntimeError will raise in case of non-compatible strides.
:type no_overlap: bool
Initialization of the :class:`ContinuousConvBlock` class.
:param int input_numb_field: The number of input fields.
:param int output_numb_field: The number of input fields.
:param filter_dim: The shape of the filter.
:type filter_dim: list[int] | tuple[int]
:param dict stride: The stride of the filter.
:param torch.nn.Module model: The neural network for inner
parametrization. Default is ``None``.
:param bool optimize: If ``True``, optimization is performed on the
continuous filter. It should be used only when the training points
are fixed. If ``model`` is in ``eval`` mode, it is reset to
``False``. Default is ``False``.
:param bool no_overlap: If ``True``, optimization is performed on the
transposed continuous filter. It should be used only when the filter
positions do not overlap for different strides.
Default is ``False``.
.. note::
Using `optimize=True` the filter can be use either in `forward`
or in `transpose` mode, not both. If `optimize=False` the same
filter can be used for both `transpose` and `forward` modes.
If ``optimize=True``, the filter can be use either in ``forward``
or in ``transpose`` mode, not both.
:Example:
>>> class MLP(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self. model = torch.nn.Sequential(
torch.nn.Linear(2, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 8),
torch.nn.ReLU(),
torch.nn.Linear(8, 1))
def forward(self, x):
return self.model(x)
... def __init__(self) -> None:
... super().__init__()
... self. model = torch.nn.Sequential(
... torch.nn.Linear(2, 8),
... torch.nn.ReLU(),
... torch.nn.Linear(8, 8),
... torch.nn.ReLU(),
... torch.nn.Linear(8, 1)
... )
... def forward(self, x):
... return self.model(x)
>>> dim = [3, 3]
>>> stride = {"domain": [10, 10],
"start": [0, 0],
"jumps": [3, 3],
"direction": [1, 1.]}
>>> stride = {
... "domain": [10, 10],
... "start": [0, 0],
... "jumps": [3, 3],
... "direction": [1, 1.]
... }
>>> conv = ContinuousConv2D(1, 2, dim, stride, MLP)
>>> conv
ContinuousConv2D(
@@ -116,7 +108,6 @@ class ContinuousConvBlock(BaseContinuousConv):
)
)
"""
super().__init__(
input_numb_field=input_numb_field,
output_numb_field=output_numb_field,
@@ -143,13 +134,13 @@ class ContinuousConvBlock(BaseContinuousConv):
def _spawn_networks(self, model):
"""
Private method to create a collection of kernels
Create a collection of kernels
:param model: A :class:`torch.nn.Module` model in form of Object class.
:type model: torch.nn.Module
:return: List of :class:`torch.nn.Module` models.
:param torch.nn.Module model: A neural network model.
:raises ValueError: If the model is not a subclass of
``torch.nn.Module``.
:return: A list of models.
:rtype: torch.nn.ModuleList
"""
nets = []
if self._net is None:
@@ -176,13 +167,11 @@ class ContinuousConvBlock(BaseContinuousConv):
def _extract_mapped_points(self, batch_idx, index, x):
"""
Priviate method to extract mapped points in the filter
Extract mapped points in the filter.
:param x: Input tensor of shape ``[channel, N, dim]``
:type x: torch.Tensor
:param torch.Tensor x: Input tensor of shape ``[channel, N, dim]``
:return: Mapped points and indeces for each channel,
:rtype: torch.Tensor, list
:rtype: tuple
"""
mapped_points = []
indeces_channels = []
@@ -218,11 +207,9 @@ class ContinuousConvBlock(BaseContinuousConv):
def _find_index(self, X):
"""
Private method to extract indeces for convolution.
:param X: Input tensor, as in ContinuousConvBlock ``__init__``.
:type X: torch.Tensor
Extract indeces for convolution.
:param torch.Tensor X: The input tensor.
"""
# append the index for each stride
index = []
@@ -236,11 +223,9 @@ class ContinuousConvBlock(BaseContinuousConv):
def _make_grid_forward(self, X):
"""
Private method to create forward convolution grid.
:param X: Input tensor, as in ContinuousConvBlock docstring.
:type X: torch.Tensor
Create forward convolution grid.
:param torch.Tensor X: The input tensor.
"""
# filter dimension + number of points in output grid
filter_dim = len(self._dim)
@@ -264,12 +249,9 @@ class ContinuousConvBlock(BaseContinuousConv):
def _make_grid_transpose(self, X):
"""
Private method to create transpose convolution grid.
:param X: Input tensor, as in ContinuousConvBlock docstring.
:type X: torch.Tensor
Create transpose convolution grid.
:param torch.Tensor X: The input tensor.
"""
# initialize to all zeros
tmp = torch.zeros_like(X).as_subclass(torch.Tensor)
@@ -280,14 +262,12 @@ class ContinuousConvBlock(BaseContinuousConv):
def _make_grid(self, X, type_):
"""
Private method to create convolution grid.
:param X: Input tensor, as in ContinuousConvBlock docstring.
:type X: torch.Tensor
:param type: Type of convolution, ``['forward', 'inverse']`` the
possibilities.
:type type: str
Create convolution grid.
:param torch.Tensor X: The input tensor.
:param str type_: The type of convolution.
Available options are: ``forward`` and ``inverse``.
:raises TypeError: If the type is not in the available options.
"""
# choose the type of convolution
if type_ == "forward":
@@ -300,15 +280,12 @@ class ContinuousConvBlock(BaseContinuousConv):
def _initialize_convolution(self, X, type_="forward"):
"""
Private method to intialize the convolution.
The convolution is initialized by setting a grid and
calculate the index for finding the points inside the
filter.
Initialize the convolution by setting a grid and computing the index to
find the points inside the filter.
:param X: Input tensor, as in ContinuousConvBlock docstring.
:type X: torch.Tensor
:param str type: type of convolution, ``['forward', 'inverse'] ``the
possibilities.
:param torch.Tensor X: The input tensor.
:param str type_: The type of convolution. Available options are:
``forward`` and ``inverse``. Default is ``forward``.
"""
# variable for the convolution
@@ -319,11 +296,10 @@ class ContinuousConvBlock(BaseContinuousConv):
def forward(self, X):
"""
Forward pass in the convolutional layer.
Forward pass.
:param x: Input data for the convolution :math:`[B, N_{in}, N, D]`.
:type x: torch.Tensor
:return: Convolution output :math:`[B, N_{out}, N, D]`.
:param torch.Tensor x: The input tensor.
:return: The output tensor.
:rtype: torch.Tensor
"""
@@ -381,25 +357,14 @@ class ContinuousConvBlock(BaseContinuousConv):
def transpose_no_overlap(self, integrals, X):
"""
Transpose pass in the layer for no-overlapping filters
Transpose pass in the layer for no-overlapping filters.
:param integrals: Weights for the transpose convolution. Shape
:math:`[B, N_{in}, N]`
where B is the batch_size, :math`N_{in}` is the number of input
fields, :math:`N` the number of points in the mesh, D the dimension
of the problem.
:type integral: torch.tensor
:param X: Input data. Expect tensor of shape
:math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size,
:math`N_{in}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:type X: torch.Tensor
:return: Feed forward transpose convolution. Tensor of shape
:math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size,
:math`N_{out}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:param torch.Tensor integrals: The weights for the transpose convolution.
Expected shape :math:`[B, N_{in}, N]`.
:param torch.Tensor X: The input data.
Expected shape :math:`[B, N_{in}, M, D]`.
:return: Feed forward transpose convolution.
Expected shape: :math:`[B, N_{out}, M, D]`.
:rtype: torch.Tensor
.. note::
@@ -466,25 +431,14 @@ class ContinuousConvBlock(BaseContinuousConv):
def transpose_overlap(self, integrals, X):
"""
Transpose pass in the layer for overlapping filters
Transpose pass in the layer for overlapping filters.
:param integrals: Weights for the transpose convolution. Shape
:math:`[B, N_{in}, N]`
where B is the batch_size, :math`N_{in}` is the number of input
fields, :math:`N` the number of points in the mesh, D the dimension
of the problem.
:type integral: torch.tensor
:param X: Input data. Expect tensor of shape
:math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size,
:math`N_{in}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:type X: torch.Tensor
:return: Feed forward transpose convolution. Tensor of shape
:math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size,
:math`N_{out}`is the number of input fields, :math:`M` the number of
points
in the mesh, :math:`D` the dimension of the problem.
:param torch.Tensor integrals: The weights for the transpose convolution.
Expected shape :math:`[B, N_{in}, N]`.
:param torch.Tensor X: The input data.
Expected shape :math:`[B, N_{in}, M, D]`.
:return: Feed forward transpose convolution.
Expected shape: :math:`[B, N_{out}, M, D]`.
:rtype: torch.Tensor
.. note:: This function is automatically called when ``.transpose()``

View File

@@ -1,4 +1,4 @@
"""Embedding modulus."""
"""Modules for the Embedding blocks."""
import torch
from pina.utils import check_consistency
@@ -6,20 +6,18 @@ from pina.utils import check_consistency
class PeriodicBoundaryEmbedding(torch.nn.Module):
r"""
Imposing hard constraint periodic boundary conditions by embedding the
Enforcing hard-constrained periodic boundary conditions by embedding the
input.
A periodic function :math:`u:\mathbb{R}^{\rm{in}}
\rightarrow\mathbb{R}^{\rm{out}}` periodic in the spatial
coordinates :math:`\mathbf{x}` with periods :math:`\mathbf{L}` is such that:
A function :math:`u:\mathbb{R}^{\rm{in}} \rightarrow\mathbb{R}^{\rm{out}}`
is periodic with respect to the spatial coordinates :math:`\mathbf{x}`
with period :math:`\mathbf{L}` if:
.. math::
u(\mathbf{x}) = u(\mathbf{x} + n \mathbf{L})\;\;
\forall n\in\mathbb{N}.
The :meth:`PeriodicBoundaryEmbedding` augments the input such that the
periodic conditonsis guarantee. The input is augmented by the following
formula:
The :class:`PeriodicBoundaryEmbedding` augments the input as follows:
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[1,
@@ -32,44 +30,48 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
.. seealso::
**Original reference**:
1. Dong, Suchuan, and Naxian Ni (2021). *A method for representing
periodic functions and enforcing exactly periodic boundary
conditions with deep neural networks*. Journal of Computational
Physics 435, 110242.
1. Dong, Suchuan, and Naxian Ni (2021).
*A method for representing periodic functions and enforcing
exactly periodic boundary conditions with deep neural networks*.
Journal of Computational Physics 435, 110242.
DOI: `10.1016/j.jcp.2021.110242.
<https://doi.org/10.1016/j.jcp.2021.110242>`_
2. Wang, S., Sankaran, S., Wang, H., & Perdikaris, P. (2023). *An
expert's guide to training physics-informed neural networks*.
2. Wang, S., Sankaran, S., Wang, H., & Perdikaris, P. (2023).
*An expert's guide to training physics-informed neural
networks*.
DOI: `arXiv preprint arXiv:2308.0846.
<https://arxiv.org/abs/2308.08468>`_
.. warning::
The embedding is a truncated fourier expansion, and only ensures
function PBC and not for its derivatives. Ensuring approximate
periodicity in
the derivatives of :math:`u` can be done, and extensive
tests have shown (also in the reference papers) that this implementation
can correctly compute the PBC on the derivatives up to the order
:math:`\sim 2,3`, while it is not guarantee the periodicity for
:math:`>3`. The PINA code is tested only for function PBC and not for
its derivatives.
The embedding is a truncated fourier expansion, and enforces periodic
boundary conditions only for the function, and not for its derivatives.
Enforcement of the approximate periodicity in the derivatives can be
performed. Extensive tests have shown (see referenced papers) that this
implementation can correctly enforce the periodic boundary conditions on
the derivatives up to the order :math:`\sim 2,3`. This is not guaranteed
for orders :math:`>3`. The PINA module is tested only for periodic
boundary conditions on the function itself.
"""
def __init__(self, input_dimension, periods, output_dimension=None):
"""
:param int input_dimension: The dimension of the input tensor, it can
be checked with `tensor.ndim` method.
:param float | int | dict periods: The periodicity in each dimension for
the input data. If ``float`` or ``int`` is passed,
the period is assumed constant for all the dimensions of the data.
If a ``dict`` is passed the `dict.values` represent periods,
while the ``dict.keys`` represent the dimension where the
periodicity is applied. The `dict.keys` can either be `int`
if working with ``torch.Tensor`` or ``str`` if
working with ``LabelTensor``.
Initialization of the :class:`PeriodicBoundaryEmbedding` block.
:param int input_dimension: The dimension of the input tensor.
:param periods: The periodicity with respect to each dimension for the
input data. If ``float`` or ``int`` is passed, the period is assumed
to be constant over all the dimensions of the data. If a ``dict`` is
passed the `dict.values` represent periods, while the ``dict.keys``
represent the dimension where the periodicity is enforced.
The `dict.keys` can either be `int` if working with
:class:`torch.Tensor`, or ``str`` if working with
:class:`pina.label_tensor.LabelTensor`.
:type periods: float | int | dict
:param int output_dimension: The dimension of the output after the
fourier embedding. If not ``None`` a ``torch.nn.Linear`` layer
fourier embedding. If not ``None``, a :class:`torch.nn.Linear` layer
is applied to the fourier embedding output to match the desired
dimensionality, default ``None``.
dimensionality. Default is ``None``.
:raises TypeError: If the periods dict is not consistent.
"""
super().__init__()
@@ -98,9 +100,10 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
def forward(self, x):
"""
Forward pass to compute the periodic boundary conditions embedding.
Forward pass.
:param torch.Tensor x: Input tensor.
:param x: The input tensor.
:type x: torch.Tensor | LabelTensor
:return: Periodic embedding of the input.
:rtype: torch.Tensor
"""
@@ -125,12 +128,16 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
def _get_vars(self, x, indeces):
"""
Get variables from input tensor ordered by specific indeces.
Get the variables from input tensor ordered by specific indeces.
:param torch.Tensor x: The input tensor to extract.
:param list[int] | list[str] indeces: List of indeces to extract.
:return: The extracted tensor given the indeces.
:rtype: torch.Tensor
:param x: The input tensor from which to extract.
:type x: torch.Tensor | LabelTensor
:param indeces: The indeces to extract.
:type indeces: list[int] | list[str]
:raises RuntimeError: If the indeces are not consistent.
:raises RuntimeError: If the extraction is not possible.
:return: The extracted tensor.
:rtype: torch.Tensor | LabelTensor
"""
if isinstance(indeces[0], str):
try:
@@ -146,75 +153,79 @@ class PeriodicBoundaryEmbedding(torch.nn.Module):
return x[..., indeces]
else:
raise RuntimeError(
"Not able to extract right indeces for tensor."
"Not able to extract correct indeces for tensor."
" For more information refer to warning in the documentation."
)
@property
def period(self):
"""
The period of the periodic function to approximate.
The period of the function.
:return: The period of the function.
:rtype: dict | float | int
"""
return self._period
class FourierFeatureEmbedding(torch.nn.Module):
"""
Fourier Feature Embedding class for encoding input features
using random Fourier features.
r"""
Fourier Feature Embedding class to encode the input features using random
Fourier features.
This class applies a Fourier transformation to the input features, which can
help in learning high-frequency variations in data. The class supports
multiscale feature embedding, creating embeddings for each scale specified
by the ``sigma`` parameter.
The Fourier Feature Embedding augments the input features as follows
(3.10 of original paper):
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[
\cos\left( \mathbf{B} \mathbf{x} \right),
\sin\left( \mathbf{B} \mathbf{x} \right)\right],
where :math:`\mathbf{B}_{ij} \sim \mathcal{N}(0, \sigma^2)`.
If multiple ``sigma`` are passed, the resulting embeddings are concateneted:
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[
\cos\left( \mathbf{B}^1 \mathbf{x} \right),
\sin\left( \mathbf{B}^1 \mathbf{x} \right),
\cos\left( \mathbf{B}^2 \mathbf{x} \right),
\sin\left( \mathbf{B}^3 \mathbf{x} \right),
\dots,
\cos\left( \mathbf{B}^M \mathbf{x} \right),
\sin\left( \mathbf{B}^M \mathbf{x} \right)\right],
where :math:`\mathbf{B}^k_{ij} \sim \mathcal{N}(0, \sigma_k^2) \quad k \in
(1, \dots, M)`.
.. seealso::
**Original reference**:
Wang, S., Wang, H., and Perdikaris, P. (2021).
*On the eigenvector bias of Fourier feature networks: From regression to
solving multi-scale PDEs with physics-informed neural networks.*
Computer Methods in Applied Mechanics and Engineering 384 (2021):
113938.
DOI: `10.1016/j.cma.2021.113938.
<https://doi.org/10.1016/j.cma.2021.113938>`_
"""
def __init__(self, input_dimension, output_dimension, sigma):
r"""
This class applies a Fourier transformation to the input features,
which can help in learning high-frequency variations in data.
If multiple sigma are provided, the class
supports multiscale feature embedding, creating embeddings for
each scale specified by the sigma.
"""
Initialization of the :class:`FourierFeatureEmbedding` block.
The :obj:`FourierFeatureEmbedding` augments the input
by the following formula (3.10 of original paper):
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[
\cos\left( \mathbf{B} \mathbf{x} \right),
\sin\left( \mathbf{B} \mathbf{x} \right)\right],
where :math:`\mathbf{B}_{ij} \sim \mathcal{N}(0, \sigma^2)`.
In case multiple ``sigma`` are passed, the resulting embeddings
are concateneted:
.. math::
\mathbf{x} \rightarrow \tilde{\mathbf{x}} = \left[
\cos\left( \mathbf{B}^1 \mathbf{x} \right),
\sin\left( \mathbf{B}^1 \mathbf{x} \right),
\cos\left( \mathbf{B}^2 \mathbf{x} \right),
\sin\left( \mathbf{B}^3 \mathbf{x} \right),
\dots,
\cos\left( \mathbf{B}^M \mathbf{x} \right),
\sin\left( \mathbf{B}^M \mathbf{x} \right)\right],
where :math:`\mathbf{B}^k_{ij} \sim \mathcal{N}(0, \sigma_k^2) \quad
k \in (1, \dots, M)`.
.. seealso::
**Original reference**:
Wang, Sifan, Hanwen Wang, and Paris Perdikaris. *On the eigenvector
bias of Fourier feature networks: From regression to solving
multi-scale PDEs with physics-informed neural networks.*
Computer Methods in Applied Mechanics and
Engineering 384 (2021): 113938.
DOI: `10.1016/j.cma.2021.113938.
<https://doi.org/10.1016/j.cma.2021.113938>`_
:param int input_dimension: The input vector dimension of the layer.
:param int output_dimension: The output dimension of the layer. The
output is obtained as a concatenation of the cosine and sine
embedding, hence it must be a multiple of two (even number).
:param int | float sigma: The standard deviation used for the
Fourier Embedding. This value must reflect the granularity of the
scale in the differential equation solution.
:param int input_dimension: The dimension of the input tensor.
:param int output_dimension: The dimension of the output tensor. The
output is obtained as a concatenation of cosine and sine embeddings.
:param sigma: The standard deviation used for the Fourier Embedding.
This value must reflect the granularity of the scale in the
differential equation solution.
:type sigma: float | int
:raises RuntimeError: If the output dimension is not an even number.
"""
super().__init__()
@@ -242,10 +253,11 @@ class FourierFeatureEmbedding(torch.nn.Module):
def forward(self, x):
"""
Forward pass to compute the fourier embedding.
Forward pass.
:param torch.Tensor x: Input tensor.
:return: Fourier embeddings of the input.
:param x: The input tensor.
:type x: torch.Tensor | LabelTensor
:return: Fourier embedding of the input.
:rtype: torch.Tensor
"""
# compute random matrix multiplication
@@ -259,6 +271,9 @@ class FourierFeatureEmbedding(torch.nn.Module):
@property
def sigma(self):
"""
Returning the variance of the sampled matrix for Fourier Embedding.
The standard deviation used for the Fourier Embedding.
:return: The standard deviation used for the Fourier Embedding.
:rtype: float | int
"""
return self._sigma

View File

@@ -1,5 +1,5 @@
"""
Module for Fourier Block implementation.
Module for the Fourier Neural Operator Block class.
"""
import torch
@@ -15,15 +15,19 @@ from .spectral import (
class FourierBlock1D(nn.Module):
"""
Fourier block implementation for three dimensional
input tensor. The combination of Fourier blocks
make up the Fourier Neural Operator
The inner block of the Fourier Neural Operator for 1-dimensional input
tensors.
The module computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the input in
the physical space. Finally an activation function is applied to the output.
.. seealso::
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier
neural operator for parametric partial differential equations*.
Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
*Fourier neural operator for parametric partial differential equations*.
DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895>`_
@@ -36,22 +40,16 @@ class FourierBlock1D(nn.Module):
n_modes,
activation=torch.nn.Tanh,
):
"""
PINA implementation of Fourier block one dimension. The module computes
the spectral convolution of the input with a linear kernel in the
fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the
input in the physical space. Finally an activation function is
applied to the output.
The block expects an input of size ``[batch, input_numb_fields, N]``
and returns an output of size ``[batch, output_numb_fields, N]``.
r"""
Initialization of the :class:`FourierBlock1D` class.
:param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each
dimension. It must be at most equal to the ``floor(N/2)+1``.
:param n_modes: The number of modes to select for each dimension.
It must be at most equal to :math:`\floor(Nx/2)+1`.
:type n_modes: list[int] | tuple[int]
:param torch.nn.Module activation: The activation function.
Default is :class:`torch.nn.Tanh`.
"""
super().__init__()
@@ -70,15 +68,11 @@ class FourierBlock1D(nn.Module):
def forward(self, x):
"""
Forward computation for Fourier Block. It performs a spectral
convolution and a linear transformation of the input and sum the
results.
Forward pass of the block. It performs a spectral convolution and a
linear transformation of the input. Then, it sums the results.
:param x: The input tensor for fourier block, expect of size
``[batch, input_numb_fields, x]``.
:type x: torch.Tensor
:return: The output tensor obtained from the
fourier block of size ``[batch, output_numb_fields, x]``.
:param torch.Tensor x: The input tensor for performing the computation.
:return: The output tensor.
:rtype: torch.Tensor
"""
return self._activation(self._spectral_conv(x) + self._linear(x))
@@ -86,18 +80,21 @@ class FourierBlock1D(nn.Module):
class FourierBlock2D(nn.Module):
"""
Fourier block implementation for two dimensional
input tensor. The combination of Fourier blocks
make up the Fourier Neural Operator
The inner block of the Fourier Neural Operator for 2-dimensional input
tensors.
The module computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the input in
the physical space. Finally an activation function is applied to the output.
.. seealso::
**Original reference**: Li, Zongyi, et al.
*Fourier neural operator for parametric partial
differential equations*. arXiv preprint
arXiv:2010.08895 (2020)
<https://arxiv.org/abs/2010.08895.pdf>`_.
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
*Fourier neural operator for parametric partial differential equations*.
DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895>`_
"""
def __init__(
@@ -107,24 +104,17 @@ class FourierBlock2D(nn.Module):
n_modes,
activation=torch.nn.Tanh,
):
"""
PINA implementation of Fourier block two dimensions. The module computes
the spectral convolution of the input with a linear kernel in the
fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the
input in the physical space. Finally an activation function is
applied to the output.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny]`` and returns an output of size
``[batch, output_numb_fields, Nx, Ny]``.
r"""
Initialization of the :class:`FourierBlock2D` class.
:param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each
dimension. It must be at most equal to the ``floor(Nx/2)+1``
and ``floor(Ny/2)+1``.
:param n_modes: The number of modes to select for each dimension.
It must be at most equal to :math:`\floor(Nx/2)+1`,
:math:`\floor(Ny/2)+1`.
:type n_modes: list[int] | tuple[int]
:param torch.nn.Module activation: The activation function.
Default is :class:`torch.nn.Tanh`.
"""
super().__init__()
@@ -142,15 +132,11 @@ class FourierBlock2D(nn.Module):
def forward(self, x):
"""
Forward computation for Fourier Block. It performs a spectral
convolution and a linear transformation of the input and sum the
results.
Forward pass of the block. It performs a spectral convolution and a
linear transformation of the input. Then, it sums the results.
:param x: The input tensor for fourier block, expect of size
``[batch, input_numb_fields, x, y]``.
:type x: torch.Tensor
:return: The output tensor obtained from the
fourier block of size ``[batch, output_numb_fields, x, y, z]``.
:param torch.Tensor x: The input tensor for performing the computation.
:return: The output tensor.
:rtype: torch.Tensor
"""
return self._activation(self._spectral_conv(x) + self._linear(x))
@@ -158,18 +144,21 @@ class FourierBlock2D(nn.Module):
class FourierBlock3D(nn.Module):
"""
Fourier block implementation for three dimensional
input tensor. The combination of Fourier blocks
make up the Fourier Neural Operator
The inner block of the Fourier Neural Operator for 3-dimensional input
tensors.
The module computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the input in
the physical space. Finally an activation function is applied to the output.
.. seealso::
**Original reference**: Li, Zongyi, et al.
*Fourier neural operator for parametric partial
differential equations*. arXiv preprint
arXiv:2010.08895 (2020)
<https://arxiv.org/abs/2010.08895.pdf>`_.
**Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K.,
Liu, B., Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020).
*Fourier neural operator for parametric partial differential equations*.
DOI: `arXiv preprint arXiv:2010.08895.
<https://arxiv.org/abs/2010.08895>`_
"""
def __init__(
@@ -179,24 +168,17 @@ class FourierBlock3D(nn.Module):
n_modes,
activation=torch.nn.Tanh,
):
"""
PINA implementation of Fourier block three dimensions. The module
computes the spectral convolution of the input with a linear kernel in
the fourier space, and then it maps the input back to the physical
space. The output is then added to a Linear tranformation of the
input in the physical space. Finally an activation function is
applied to the output.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny, Nz]`` and returns an output of size
``[batch, output_numb_fields, Nx, Ny, Nz]``.
r"""
Initialization of the :class:`FourierBlock3D` class.
:param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each
dimension. It must be at most equal to the ``floor(Nx/2)+1``,
``floor(Ny/2)+1`` and ``floor(Nz/2)+1``.
:param n_modes: The number of modes to select for each dimension.
It must be at most equal to :math:`\floor(Nx/2)+1`,
:math:`\floor(Ny/2)+1`, :math:`\floor(Nz/2)+1`.
:type n_modes: list[int] | tuple[int]
:param torch.nn.Module activation: The activation function.
Default is :class:`torch.nn.Tanh`.
"""
super().__init__()
@@ -214,15 +196,11 @@ class FourierBlock3D(nn.Module):
def forward(self, x):
"""
Forward computation for Fourier Block. It performs a spectral
convolution and a linear transformation of the input and sum the
results.
Forward pass of the block. It performs a spectral convolution and a
linear transformation of the input. Then, it sums the results.
:param x: The input tensor for fourier block, expect of size
``[batch, input_numb_fields, x, y, z]``.
:type x: torch.Tensor
:return: The output tensor obtained from the
fourier block of size ``[batch, output_numb_fields, x, y, z]``.
:param torch.Tensor x: The input tensor for performing the computation.
:return: The output tensor.
:rtype: torch.Tensor
"""
return self._activation(self._spectral_conv(x) + self._linear(x))

View File

@@ -1,5 +1,5 @@
"""
Module containing the Graph Integral Layer class.
Module for the Graph Neural Operator Block class.
"""
import torch
@@ -8,7 +8,7 @@ from torch_geometric.nn import MessagePassing
class GNOBlock(MessagePassing):
"""
Graph Neural Operator (GNO) Block using PyG MessagePassing.
The inner block of the Graph Neural Operator, based on Message Passing.
"""
def __init__(
@@ -22,11 +22,22 @@ class GNOBlock(MessagePassing):
external_func=None,
):
"""
Initialize the GNOBlock.
Initialization of the :class:`GNOBlock` class.
:param width: Hidden dimension of node features.
:param edges_features: Number of edge features.
:param n_layers: Number of layers in edge transformation MLP.
:param int width: The width of the kernel.
:param int edge_features: The number of edge features.
:param int n_layers: The number of kernel layers. Default is ``2``.
:param layers: A list specifying the number of neurons for each layer
of the neural network. If not ``None``, it overrides the
``inner_size`` and ``n_layers``parameters. Default is ``None``.
:type layers: list[int] | tuple[int]
:param int inner_size: The size of the inner layer. Default is ``None``.
:param torch.nn.Module internal_func: The activation function applied to
the output of each layer. If ``None``, it uses the
:class:`torch.nn.Tanh` activation. Default is ``None``.
:param torch.nn.Module external_func: The activation function applied to
the output of the block. If ``None``, it uses the
:class:`torch.nn.Tanh`. activation. Default is ``None``.
"""
from ...model.feed_forward import FeedForward
@@ -51,12 +62,13 @@ class GNOBlock(MessagePassing):
def message_and_aggregate(self, edge_index, x, edge_attr):
"""
Combines message and aggregation.
Combine messages and perform aggregation.
:param edge_index: COO format edge indices.
:param x: Node feature matrix [num_nodes, width].
:param edge_attr: Edge features [num_edges, edge_dim].
:return: Aggregated messages.
:param torch.Tensor edge_index: The edge index.
:param torch.Tensor x: The node feature matrix.
:param torch.Tensor edge_attr: The edge features.
:return: The aggregated messages.
:rtype: torch.Tensor
"""
# Edge features are transformed into a matrix of shape
# [num_edges, width, width]
@@ -68,27 +80,33 @@ class GNOBlock(MessagePassing):
def edge_update(self, edge_attr):
"""
Updates edge features.
Update edge features.
:param torch.Tensor edge_attr: The edge features.
:return: The updated edge features.
:rtype: torch.Tensor
"""
return edge_attr
def update(self, aggr_out, x):
"""
Updates node features.
Update node features.
:param aggr_out: Aggregated messages.
:param x: Node feature matrix.
:return: Updated node features.
:param torch.Tensor aggr_out: The aggregated messages.
:param torch.Tensor x: The node feature matrix.
:return: The updated node features.
:rtype: torch.Tensor
"""
return aggr_out + self.W(x)
def forward(self, x, edge_index, edge_attr):
"""
Forward pass of the GNOBlock.
Forward pass of the block.
:param x: Node features.
:param edge_index: Edge indices.
:param edge_attr: Edge features.
:return: Updated node features.
:param torch.Tensor x: The node features.
:param torch.Tensor edge_index: The edge indeces.
:param torch.Tensor edge_attr: The edge features.
:return: The updated node features.
:rtype: torch.Tensor
"""
return self.func(self.propagate(edge_index, x=x, edge_attr=edge_attr))

View File

@@ -1,5 +1,5 @@
"""
Module for performing integral for continuous convolution
Module to perform integration for continuous convolution.
"""
import torch
@@ -7,17 +7,18 @@ import torch
class Integral:
"""
Integral class for continous convolution
Class allowing integration for continous convolution.
"""
def __init__(self, param):
"""
Initialize the integral class
Initializzation of the :class:`Integral` class.
:param param: type of continuous convolution
:param param: The type of continuous convolution.
:type param: string
:raises TypeError: If the parameter is neither ``discrete``
nor ``continuous``.
"""
if param == "discrete":
self.make_integral = self.integral_param_disc
elif param == "continuous":
@@ -26,46 +27,47 @@ class Integral:
raise TypeError
def __call__(self, *args, **kwds):
"""
Call the integral function
:param list args: Arguments for the integral function.
:param dict kwds: Keyword arguments for the integral function.
:return: The integral of the input.
:rtype: torch.tensor
"""
return self.make_integral(*args, **kwds)
def _prepend_zero(self, x):
"""Create bins for performing integral
"""
Create bins to perform integration.
:param x: input tensor
:type x: torch.tensor
:return: bins for integrals
:rtype: torch.tensor
:param torch.Tensor x: The input tensor.
:return: The bins for the integral.
:rtype: torch.Tensor
"""
return torch.cat((torch.zeros(1, dtype=x.dtype, device=x.device), x))
def integral_param_disc(self, x, y, idx):
"""Perform discretize integral
with discrete parameters
"""
Perform discrete integration with discrete parameters.
:param x: input vector
:type x: torch.tensor
:param y: input vector
:type y: torch.tensor
:param idx: indeces for different strides
:type idx: list
:return: integral
:rtype: torch.tensor
:param torch.Tensor x: The first input tensor.
:param torch.Tensor y: The second input tensor.
:param list[int] idx: The indices for different strides.
:return: The discrete integral.
:rtype: torch.Tensor
"""
cs_idxes = self._prepend_zero(torch.cumsum(torch.tensor(idx), 0))
cs = self._prepend_zero(torch.cumsum(x.flatten() * y.flatten(), 0))
return cs[cs_idxes[1:]] - cs[cs_idxes[:-1]]
def integral_param_cont(self, x, y, idx):
"""Perform discretize integral for continuous convolution
with continuous parameters
"""
Perform continuous integration with continuous parameters.
:param x: input vector
:type x: torch.tensor
:param y: input vector
:type y: torch.tensor
:param idx: indeces for different strides
:type idx: list
:return: integral
:rtype: torch.tensor
:param torch.Tensor x: The first input tensor.
:param torch.Tensor y: The second input tensor.
:param list[int] idx: The indices for different strides.
:raises NotImplementedError: The method is not implemented.
"""
raise NotImplementedError

View File

@@ -1,4 +1,4 @@
"""Module for Averaging Neural Operator Layer class."""
"""Module for the Low Rank Neural Operator Block class."""
import torch
@@ -6,30 +6,8 @@ from ...utils import check_consistency
class LowRankBlock(torch.nn.Module):
r"""
The PINA implementation of the inner layer of the Averaging Neural Operator.
The operator layer performs an affine transformation where the convolution
is approximated with a local average. Given the input function
:math:`v(x)\in\mathbb{R}^{\rm{emb}}` the layer computes
the operator update :math:`K(v)` as:
.. math::
K(v) = \sigma\left(Wv(x) + b + \sum_{i=1}^r \langle
\psi^{(i)} , v(x) \rangle \phi^{(i)} \right)
where:
* :math:`\mathbb{R}^{\rm{emb}}` is the embedding (hidden) size
corresponding to the ``hidden_size`` object
* :math:`\sigma` is a non-linear activation, corresponding to the
``func`` object
* :math:`W\in\mathbb{R}^{\rm{emb}\times\rm{emb}}` is a tunable matrix.
* :math:`b\in\mathbb{R}^{\rm{emb}}` is a tunable bias.
* :math:`\psi^{(i)}\in\mathbb{R}^{\rm{emb}}` and
:math:`\phi^{(i)}\in\mathbb{R}^{\rm{emb}}` are :math:`r` a low rank
basis functions mapping.
* :math:`b\in\mathbb{R}^{\rm{emb}}` is a tunable bias.
"""
The inner block of the Low Rank Neural Operator.
.. seealso::
@@ -38,7 +16,6 @@ class LowRankBlock(torch.nn.Module):
(2023). *Neural operator: Learning maps between function
spaces with applications to PDEs*. Journal of Machine Learning
Research, 24(89), 1-97.
"""
def __init__(
@@ -51,30 +28,25 @@ class LowRankBlock(torch.nn.Module):
func=torch.nn.Tanh,
bias=True,
):
"""
:param int input_dimensions: The number of input components of the
model.
Expected tensor shape of the form :math:`(*, d)`, where *
means any number of dimensions including none,
and :math:`d` the ``input_dimensions``.
:param int embedding_dimenion: Size of the embedding dimension of the
field.
:param int rank: The rank number of the basis approximation components
of the model. Expected tensor shape of the form :math:`(*, 2d)`,
where * means any number of dimensions including none,
and :math:`2d` the ``rank`` for both basis functions.
:param int inner_size: Number of neurons in the hidden layer(s) for the
basis function network. Default is 20.
:param int n_layers: Number of hidden layers. for the
basis function network. Default is 2.
:param func: The activation function to use for the
basis function network. If a single
:class:`torch.nn.Module` is passed, this is used as
activation function after any layers, except the last one.
If a list of Modules is passed,
they are used as activation functions at any layers, in order.
:param bool bias: If ``True`` the MLP will consider some bias for the
basis function network.
r"""
Initialization of the :class:`LowRankBlock` class.
:param int input_dimensions: The input dimension of the field.
:param int embedding_dimenion: The embedding dimension of the field.
:param int rank: The rank of the low rank approximation. The expected
value is :math:`2d`, where :math:`d` is the rank of each basis
function.
:param int inner_size: The number of neurons for each hidden layer in
the basis function neural network. Default is ``20``.
:param int n_layers: The number of hidden layers in the basis function
neural network. Default is ``2``.
:param func: The activation function. If a list is passed, it must have
the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`.
:type func: torch.nn.Module | list[torch.nn.Module]
:param bool bias: If ``True`` bias is considered for the basis function
neural network. Default is ``True``.
"""
super().__init__()
from ..feed_forward import FeedForward
@@ -96,26 +68,16 @@ class LowRankBlock(torch.nn.Module):
def forward(self, x, coords):
r"""
Forward pass of the layer, it performs an affine transformation of
the field, and a low rank approximation by
doing a dot product of the basis
:math:`\psi^{(i)}` with the filed vector :math:`v`, and use this
coefficients to expand :math:`\phi^{(i)}` evaluated in the
spatial input :math:`x`.
Forward pass of the block. It performs an affine transformation of the
field, followed by a low rank approximation. The latter is performed by
means of a dot product of the basis :math:`\psi^{(i)}` with the vector
field :math:`v` to compute coefficients used to expand
:math:`\phi^{(i)}`, evaluated in the spatial input :math:`x`.
:param torch.Tensor x: The input tensor for performing the
computation. It expects a tensor :math:`B \times N \times D`,
where :math:`B` is the batch_size, :math:`N` the number of points
in the mesh, :math:`D` the dimension of the problem. In particular
:math:`D` is the codomain of the function :math:`v`. For example
a scalar function has :math:`D=1`, a 4-dimensional vector function
:math:`D=4`.
:param torch.Tensor coords: The coordinates in which the field is
evaluated for performing the computation. It expects a
tensor :math:`B \times N \times d`,
where :math:`B` is the batch_size, :math:`N` the number of points
in the mesh, :math:`D` the dimension of the domain.
:return: The output tensor obtained from Average Neural Operator Block.
:param torch.Tensor x: The input tensor for performing the computation.
:param torch.Tensor coords: The coordinates for which the field is
evaluated to perform the computation.
:return: The output tensor.
:rtype: torch.Tensor
"""
# extract basis
@@ -138,5 +100,8 @@ class LowRankBlock(torch.nn.Module):
def rank(self):
"""
The basis rank.
:return: The basis rank.
:rtype: int
"""
return self._rank

View File

@@ -1,4 +1,4 @@
"""Module for OrthogonalBlock."""
"""Module for the Orthogonal Block class."""
import torch
from ...utils import check_consistency
@@ -6,21 +6,24 @@ from ...utils import check_consistency
class OrthogonalBlock(torch.nn.Module):
"""
Module to make the input orthonormal.
The module takes a tensor of size :math:`[N, M]` and returns a tensor of
size :math:`[N, M]` where the columns are orthonormal. The block performs a
Gram Schmidt orthogonalization process for the input, see
Orthogonal Block.
This block transforms an input tensor of shape :math:`[N, M]` into a tensor
of the same shape whose columns are orthonormal. The block performs the
Gram Schmidt orthogonalization, see
`here <https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process>` for
details.
"""
def __init__(self, dim=-1, requires_grad=True):
"""
Initialize the OrthogonalBlock module.
Initialization of the :class:`OrthogonalBlock` class.
:param int dim: The dimension where to orthogonalize.
:param bool requires_grad: If autograd should record operations on
the returned tensor, defaults to True.
:param int dim: The dimension on which orthogonalization is performed.
If ``-1``, the orthogonalization is performed on the last dimension.
Default is ``-1``.
:param bool requires_grad: If ``True``, the gradients are computed
during the backward pass. Default is ``True``
"""
super().__init__()
# store dim
@@ -31,14 +34,13 @@ class OrthogonalBlock(torch.nn.Module):
def forward(self, X):
"""
Forward pass of the OrthogonalBlock module using a Gram-Schmidt
algorithm.
Forward pass.
:raises Warning: If the dimension is greater than the other dimensions.
:param torch.Tensor X: The input tensor to orthogonalize. The input must
be of dimensions :math:`[N, M]`.
:param torch.Tensor X: The input tensor to orthogonalize.
:raises Warning: If the chosen dimension is greater than the other
dimensions in the input.
:return: The orthonormal tensor.
:rtype: torch.Tensor
"""
# check dim is less than all the other dimensions
if X.shape[self.dim] > min(X.shape):
@@ -65,13 +67,12 @@ class OrthogonalBlock(torch.nn.Module):
def _differentiable_copy(self, result, idx, value):
"""
Perform a differentiable copy operation on a tensor.
Perform a differentiable copy operation.
:param torch.Tensor result: The tensor where values will be copied to.
:param torch.Tensor result: The tensor where values are be copied to.
:param int idx: The index along the specified dimension where the
value will be copied.
:param torch.Tensor value: The tensor value to copy into the
result tensor.
values are copied.
:param torch.Tensor value: The tensor value to copy into ``result``.
:return: A new tensor with the copied values.
:rtype: torch.Tensor
"""
@@ -82,7 +83,7 @@ class OrthogonalBlock(torch.nn.Module):
@property
def dim(self):
"""
Get the dimension along which operations are performed.
The dimension along which operations are performed.
:return: The current dimension value.
:rtype: int
@@ -94,10 +95,11 @@ class OrthogonalBlock(torch.nn.Module):
"""
Set the dimension along which operations are performed.
:param value: The dimension to be set, which must be 0, 1, or -1.
:param value: The dimension to be set. Must be either ``0``, ``1``, or
``-1``.
:type value: int
:raises IndexError: If the provided dimension is not in the
range [-1, 1].
:raises IndexError: If the provided dimension is not ``0``, ``1``, or
``-1``.
"""
# check consistency
check_consistency(value, int)
@@ -115,7 +117,7 @@ class OrthogonalBlock(torch.nn.Module):
Indicates whether gradient computation is required for operations
on the tensors.
:return: True if gradients are required, False otherwise.
:return: ``True`` if gradients are required, ``False`` otherwise.
:rtype: bool
"""
return self._requires_grad

View File

@@ -5,23 +5,26 @@ import torch
class PODBlock(torch.nn.Module):
"""
POD layer: it projects the input field on the proper orthogonal
decomposition basis. It needs to be fitted to the data before being used
with the method :meth:`fit`, which invokes the singular value decomposition.
The layer is not trainable.
Proper Orthogonal Decomposition block.
This block projects the input field on the proper orthogonal decomposition
basis. Before being used, it must be fitted to the data with the ``fit``
method, which invokes the singular value decomposition. This block is not
trainable.
.. note::
All the POD modes are stored in memory, avoiding to recompute them when
the rank changes but increasing the memory usage.
the rank changes, leading to increased memory usage.
"""
def __init__(self, rank, scale_coefficients=True):
"""
Build the POD layer with the given rank.
Initialization of the :class:`PODBlock` class.
:param int rank: The rank of the POD layer.
:param bool scale_coefficients: If True, the coefficients are scaled
:param bool scale_coefficients: If ``True``, the coefficients are scaled
after the projection to have zero mean and unit variance.
Default is ``True``.
"""
super().__init__()
self.__scale_coefficients = scale_coefficients
@@ -34,12 +37,19 @@ class PODBlock(torch.nn.Module):
"""
The rank of the POD layer.
:return: The rank of the POD layer.
:rtype: int
"""
return self._rank
@rank.setter
def rank(self, value):
"""
Set the rank of the POD layer.
:param int value: The new rank of the POD layer.
:raises ValueError: If the rank is not a positive integer.
"""
if value < 1 or not isinstance(value, int):
raise ValueError("The rank must be positive integer")
@@ -48,9 +58,10 @@ class PODBlock(torch.nn.Module):
@property
def basis(self):
"""
The POD basis. It is a matrix whose columns are the first `self.rank`
POD modes.
The POD basis. It is a matrix whose columns are the first ``rank`` POD
modes.
:return: The POD basis.
:rtype: torch.Tensor
"""
if self._basis is None:
@@ -61,9 +72,11 @@ class PODBlock(torch.nn.Module):
@property
def scaler(self):
"""
The scaler. It is a dictionary with the keys `'mean'` and `'std'` that
store the mean and the standard deviation of the coefficients.
Return the scaler dictionary, having keys ``mean`` and ``std``
corresponding to the mean and the standard deviation of the
coefficients, respectively.
:return: The scaler dictionary.
:rtype: dict
"""
if self._scaler is None:
@@ -77,9 +90,9 @@ class PODBlock(torch.nn.Module):
@property
def scale_coefficients(self):
"""
If True, the coefficients are scaled after the projection to have zero
mean and unit variance.
The flag indicating if the coefficients are scaled after the projection.
:return: The flag indicating if the coefficients are scaled.
:rtype: bool
"""
return self.__scale_coefficients
@@ -87,10 +100,10 @@ class PODBlock(torch.nn.Module):
def fit(self, X, randomized=True):
"""
Set the POD basis by performing the singular value decomposition of the
given tensor. If `self.scale_coefficients` is True, the coefficients
given tensor. If ``self.scale_coefficients`` is True, the coefficients
are scaled after the projection to have zero mean and unit variance.
:param torch.Tensor X: The tensor to be reduced.
:param torch.Tensor X: The input tensor to be reduced.
"""
self._fit_pod(X, randomized)
@@ -99,10 +112,8 @@ class PODBlock(torch.nn.Module):
def _fit_scaler(self, coeffs):
"""
Private merhod that computes the mean and the standard deviation of the
given coefficients, allowing to scale them to have zero mean and unit
variance. Mean and standard deviation are stored in the private member
`_scaler`.
Compute the mean and the standard deviation of the given coefficients,
which are then stored in ``self._scaler``.
:param torch.Tensor coeffs: The coefficients to be scaled.
"""
@@ -113,8 +124,8 @@ class PODBlock(torch.nn.Module):
def _fit_pod(self, X, randomized):
"""
Private method that computes the POD basis of the given tensor and
stores it in the private member `_basis`.
Compute the POD basis of the given tensor, which is then stored in
``self._basis``.
:param torch.Tensor X: The tensor to be reduced.
"""
@@ -136,9 +147,7 @@ class PODBlock(torch.nn.Module):
def forward(self, X):
"""
The forward pass of the POD layer. By default it executes the
:meth:`reduce` method, reducing the input tensor to its POD
representation. The POD layer needs to be fitted before being used.
The forward pass of the POD layer.
:param torch.Tensor X: The input tensor to be reduced.
:return: The reduced tensor.
@@ -148,10 +157,11 @@ class PODBlock(torch.nn.Module):
def reduce(self, X):
"""
Reduce the input tensor to its POD representation. The POD layer needs
to be fitted before being used.
Reduce the input tensor to its POD representation. The POD layer must
be fitted before being used.
:param torch.Tensor X: The input tensor to be reduced.
:raises RuntimeError: If the POD layer is not fitted.
:return: The reduced tensor.
:rtype: torch.Tensor
"""
@@ -176,6 +186,7 @@ class PODBlock(torch.nn.Module):
to be fitted before being used.
:param torch.Tensor coeff: The coefficients to be expanded.
:raises RuntimeError: If the POD layer is not fitted.
:return: The expanded tensor.
:rtype: torch.Tensor
"""

View File

@@ -1,4 +1,4 @@
"""Module for Radial Basis Function Interpolation layer."""
"""Module for the Radial Basis Function Interpolation layer."""
import math
import warnings
@@ -10,6 +10,10 @@ from ...utils import check_consistency
def linear(r):
"""
Linear radial basis function.
:param torch.Tensor r: Distance between points.
:return: The linear radial basis function.
:rtype: torch.Tensor
"""
return -r
@@ -17,6 +21,11 @@ def linear(r):
def thin_plate_spline(r, eps=1e-7):
"""
Thin plate spline radial basis function.
:param torch.Tensor r: Distance between points.
:param float eps: Small value to avoid log(0).
:return: The thin plate spline radial basis function.
:rtype: torch.Tensor
"""
r = torch.clamp(r, min=eps)
return r**2 * torch.log(r)
@@ -25,6 +34,10 @@ def thin_plate_spline(r, eps=1e-7):
def cubic(r):
"""
Cubic radial basis function.
:param torch.Tensor r: Distance between points.
:return: The cubic radial basis function.
:rtype: torch.Tensor
"""
return r**3
@@ -32,6 +45,10 @@ def cubic(r):
def quintic(r):
"""
Quintic radial basis function.
:param torch.Tensor r: Distance between points.
:return: The quintic radial basis function.
:rtype: torch.Tensor
"""
return -(r**5)
@@ -39,6 +56,10 @@ def quintic(r):
def multiquadric(r):
"""
Multiquadric radial basis function.
:param torch.Tensor r: Distance between points.
:return: The multiquadric radial basis function.
:rtype: torch.Tensor
"""
return -torch.sqrt(r**2 + 1)
@@ -46,6 +67,10 @@ def multiquadric(r):
def inverse_multiquadric(r):
"""
Inverse multiquadric radial basis function.
:param torch.Tensor r: Distance between points.
:return: The inverse multiquadric radial basis function.
:rtype: torch.Tensor
"""
return 1 / torch.sqrt(r**2 + 1)
@@ -53,6 +78,10 @@ def inverse_multiquadric(r):
def inverse_quadratic(r):
"""
Inverse quadratic radial basis function.
:param torch.Tensor r: Distance between points.
:return: The inverse quadratic radial basis function.
:rtype: torch.Tensor
"""
return 1 / (r**2 + 1)
@@ -60,6 +89,10 @@ def inverse_quadratic(r):
def gaussian(r):
"""
Gaussian radial basis function.
:param torch.Tensor r: Distance between points.
:return: The gaussian radial basis function.
:rtype: torch.Tensor
"""
return torch.exp(-(r**2))
@@ -88,13 +121,14 @@ min_degree_funcs = {
class RBFBlock(torch.nn.Module):
"""
Radial Basis Function (RBF) interpolation layer. It need to be fitted with
the data with the method :meth:`fit`, before it can be used to interpolate
new points. The layer is not trainable.
Radial Basis Function (RBF) interpolation layer.
The user needs to fit the model with the data, before using it to
interpolate new points. The layer is not trainable.
.. note::
It reproduces the implementation of ``scipy.interpolate.RBFBlock`` and
it is inspired from the implementation in `torchrbf.
It reproduces the implementation of :class:`scipy.interpolate.RBFBlock`
and it is inspired from the implementation in `torchrbf.
<https://github.com/ArmanMaesumi/torchrbf>`_
"""
@@ -107,24 +141,25 @@ class RBFBlock(torch.nn.Module):
degree=None,
):
"""
:param int neighbors: Number of neighbors to use for the
interpolation.
If ``None``, use all data points.
:param float smoothing: Smoothing parameter for the interpolation.
if 0.0, the interpolation is exact and no smoothing is applied.
:param str kernel: Radial basis function to use. Must be one of
``linear``, ``thin_plate_spline``, ``cubic``, ``quintic``,
``multiquadric``, ``inverse_multiquadric``, ``inverse_quadratic``,
or ``gaussian``.
:param float epsilon: Shape parameter that scaled the input to
the RBF. This defaults to 1 for kernels in ``scale_invariant``
dictionary, and must be specified for other kernels.
:param int degree: Degree of the added polynomial.
For some kernels, there exists a minimum degree of the polynomial
such that the RBF is well-posed. Those minimum degrees are specified
in the `min_degree_funcs` dictionary above. If `degree` is less than
the minimum degree, a warning is raised and the degree is set to the
minimum value.
Initialization of the :class:`RBFBlock` class.
:param int neighbors: The number of neighbors used for interpolation.
If ``None``, all data are used.
:param float smoothing: The moothing parameter for the interpolation.
If ``0.0``, the interpolation is exact and no smoothing is applied.
:param str kernel: The radial basis function to use.
The available kernels are: ``linear``, ``thin_plate_spline``,
``cubic``, ``quintic``, ``multiquadric``, ``inverse_multiquadric``,
``inverse_quadratic``, or ``gaussian``.
:param float epsilon: The shape parameter that scales the input to the
RBF. Default is ``1`` for kernels in the ``scale_invariant``
dictionary, while it must be specified for other kernels.
:param int degree: The degree of the polynomial. Some kernels require a
minimum degree of the polynomial to ensure that the RBF is well
defined. These minimum degrees are specified in the
``min_degree_funcs`` dictionary. If ``degree`` is less than the
minimum degree required, a warning is raised and the degree is set
to the minimum value.
"""
super().__init__()
@@ -151,27 +186,39 @@ class RBFBlock(torch.nn.Module):
@property
def smoothing(self):
"""
Smoothing parameter for the interpolation.
The smoothing parameter for the interpolation.
:return: The smoothing parameter.
:rtype: float
"""
return self._smoothing
@smoothing.setter
def smoothing(self, value):
"""
Set the smoothing parameter for the interpolation.
:param float value: The smoothing parameter.
"""
self._smoothing = value
@property
def kernel(self):
"""
Radial basis function to use.
The Radial basis function.
:return: The radial basis function.
:rtype: str
"""
return self._kernel
@kernel.setter
def kernel(self, value):
"""
Set the radial basis function.
:param str value: The radial basis function.
"""
if value not in radial_functions:
raise ValueError(f"Unknown kernel: {value}")
self._kernel = value.lower()
@@ -179,14 +226,22 @@ class RBFBlock(torch.nn.Module):
@property
def epsilon(self):
"""
Shape parameter that scaled the input to the RBF.
The shape parameter that scales the input to the RBF.
:return: The shape parameter.
:rtype: float
"""
return self._epsilon
@epsilon.setter
def epsilon(self, value):
"""
Set the shape parameter.
:param float value: The shape parameter.
:raises ValueError: If the kernel requires an epsilon and it is not
specified.
"""
if value is None:
if self.kernel in scale_invariant:
value = 1.0
@@ -199,14 +254,23 @@ class RBFBlock(torch.nn.Module):
@property
def degree(self):
"""
Degree of the added polynomial.
The degree of the polynomial.
:return: The degree of the polynomial.
:rtype: int
"""
return self._degree
@degree.setter
def degree(self, value):
"""
Set the degree of the polynomial.
:param int value: The degree of the polynomial.
:raises UserWarning: If the degree is less than the minimum required
for the kernel.
:raises ValueError: If the degree is less than -1.
"""
min_degree = min_degree_funcs.get(self.kernel, -1)
if value is None:
value = max(min_degree, 0)
@@ -223,6 +287,13 @@ class RBFBlock(torch.nn.Module):
self._degree = value
def _check_data(self, y, d):
"""
Check the data consistency.
:param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: The tensor of data values.
:raises ValueError: If the data is not consistent.
"""
if y.ndim != 2:
raise ValueError("y must be a 2-dimensional tensor.")
@@ -241,8 +312,11 @@ class RBFBlock(torch.nn.Module):
"""
Fit the RBF interpolator to the data.
:param torch.Tensor y: (n, d) tensor of data points.
:param torch.Tensor d: (n, m) tensor of data values.
:param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: The tensor of data values.
:raises NotImplementedError: If the neighbors are not ``None``.
:raises ValueError: If the data is not compatible with the requested
degree.
"""
self._check_data(y, d)
@@ -252,7 +326,7 @@ class RBFBlock(torch.nn.Module):
if self.neighbors is None:
nobs = self.y.shape[0]
else:
raise NotImplementedError("neighbors currently not supported")
raise NotImplementedError("Neighbors currently not supported")
powers = RBFBlock.monomial_powers(self.y.shape[1], self.degree).to(
y.device
@@ -276,12 +350,14 @@ class RBFBlock(torch.nn.Module):
def forward(self, x):
"""
Returns the interpolated data at the given points `x`.
Forward pass.
:param torch.Tensor x: `(n, d)` tensor of points at which
to query the interpolator
:rtype: `(n, m)` torch.Tensor of interpolated data.
:param torch.Tensor x: The tensor of points to interpolate.
:raises ValueError: If the input is not a 2-dimensional tensor.
:raises ValueError: If the second dimension of the input is not the same
as the second dimension of the data.
:return: The interpolated data.
:rtype: torch.Tensor
"""
if x.ndim != 2:
raise ValueError("`x` must be a 2-dimensional tensor.")
@@ -309,25 +385,25 @@ class RBFBlock(torch.nn.Module):
@staticmethod
def kernel_vector(x, y, kernel_func):
"""
Evaluate radial functions with centers `y` for all points in `x`.
Evaluate for all points ``x`` the radial functions with center ``y``.
:param torch.Tensor x: `(n, d)` tensor of points.
:param torch.Tensor y: `(m, d)` tensor of centers.
:param torch.Tensor x: The tensor of points.
:param torch.Tensor y: The tensor of centers.
:param str kernel_func: Radial basis function to use.
:rtype: `(n, m)` torch.Tensor of radial function values.
:return: The radial function values.
:rtype: torch.Tensor
"""
return kernel_func(torch.cdist(x, y))
@staticmethod
def polynomial_matrix(x, powers):
"""
Evaluate monomials at `x` with given `powers`.
Evaluate monomials of power ``powers`` at points ``x``.
:param torch.Tensor x: `(n, d)` tensor of points.
:param torch.Tensor powers: `(r, d)` tensor of powers for each monomial.
:rtype: `(n, r)` torch.Tensor of monomial values.
:param torch.Tensor x: The tensor of points.
:param torch.Tensor powers: The tensor of powers for each monomial.
:return: The monomial values.
:rtype: torch.Tensor
"""
x_ = torch.repeat_interleave(x, repeats=powers.shape[0], dim=0)
powers_ = powers.repeat(x.shape[0], 1)
@@ -336,12 +412,12 @@ class RBFBlock(torch.nn.Module):
@staticmethod
def kernel_matrix(x, kernel_func):
"""
Returns radial function values for all pairs of points in `x`.
Return the radial function values for all pairs of points in ``x``.
:param torch.Tensor x: `(n, d`) tensor of points.
:param str kernel_func: Radial basis function to use.
:rtype: `(n, n`) torch.Tensor of radial function values.
:param torch.Tensor x: The tensor of points.
:param str kernel_func: The radial basis function to use.
:return: The radial function values.
:rtype: torch.Tensor
"""
return kernel_func(torch.cdist(x, x))
@@ -350,12 +426,10 @@ class RBFBlock(torch.nn.Module):
"""
Return the powers for each monomial in a polynomial.
:param int ndim: Number of variables in the polynomial.
:param int degree: Degree of the polynomial.
:rtype: `(nmonos, ndim)` torch.Tensor where each row contains the powers
for each variable in a monomial.
:param int ndim: The number of variables in the polynomial.
:param int degree: The degree of the polynomial.
:return: The powers for each monomial.
:rtype: torch.Tensor
"""
nmonos = math.comb(degree + ndim, ndim)
out = torch.zeros((nmonos, ndim), dtype=torch.int32)
@@ -372,16 +446,16 @@ class RBFBlock(torch.nn.Module):
"""
Build the RBF linear system.
:param torch.Tensor y: (n, d) tensor of data points.
:param torch.Tensor d: (n, m) tensor of data values.
:param torch.Tensor smoothing: (n,) tensor of smoothing parameters.
:param str kernel: Radial basis function to use.
:param float epsilon: Shape parameter that scaled the input to the RBF.
:param torch.Tensor powers: (r, d) tensor of powers for each monomial.
:rtype: (lhs, rhs, shift, scale) where `lhs` and `rhs` are the
left-hand side and right-hand side of the linear system, and
`shift` and `scale` are the shift and scale parameters.
:param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: The tensor of data values.
:param torch.Tensor smoothing: The tensor of smoothing parameters.
:param str kernel: The radial basis function to use.
:param float epsilon: The shape parameter that scales the input to the
RBF.
:param torch.Tensor powers: The tensor of powers for each monomial.
:return: The left-hand side and right-hand side of the linear system,
and the shift and scale parameters.
:rtype: tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]
"""
p = d.shape[0]
s = d.shape[1]
@@ -413,21 +487,20 @@ class RBFBlock(torch.nn.Module):
@staticmethod
def solve(y, d, smoothing, kernel, epsilon, powers):
"""
Build then solve the RBF linear system.
Build and solve the RBF linear system.
:param torch.Tensor y: (n, d) tensor of data points.
:param torch.Tensor d: (n, m) tensor of data values.
:param torch.Tensor smoothing: (n,) tensor of smoothing parameters.
:param str kernel: Radial basis function to use.
:param float epsilon: Shape parameter that scaled the input to the RBF.
:param torch.Tensor powers: (r, d) tensor of powers for each monomial.
:param torch.Tensor y: The tensor of data points.
:param torch.Tensor d: The tensor of data values.
:param torch.Tensor smoothing: The tensor of smoothing parameters.
:param str kernel: The radial basis function to use.
:param float epsilon: The shape parameter that scaled the input to the
RBF.
:param torch.Tensor powers: The tensor of powers for each monomial.
:raises ValueError: If the linear system is singular.
:rtype: (shift, scale, coeffs) where `shift` and `scale` are the
shift and scale parameters, and `coeffs` are the coefficients
of the interpolator
:return: The shift and scale parameters, and the coefficients of the
interpolator.
:rtype: tuple[torch.Tensor, torch.Tensor, torch.Tensor]
"""
lhs, rhs, shift, scale = RBFBlock.build(

View File

@@ -1,5 +1,5 @@
"""
TODO: Add title.
Module for residual blocks and enhanced linear layers.
"""
import torch
@@ -8,16 +8,16 @@ from ...utils import check_consistency
class ResidualBlock(nn.Module):
"""Residual block base class. Implementation of a residual block.
"""
Residual block class.
.. seealso::
**Original reference**: He, Kaiming, et al.
*Deep residual learning for image recognition.*
Proceedings of the IEEE conference on computer vision
and pattern recognition. 2016..
Proceedings of the IEEE conference on computer vision and pattern
recognition. 2016.
DOI: `<https://arxiv.org/pdf/1512.03385.pdf>`_.
"""
def __init__(
@@ -29,18 +29,15 @@ class ResidualBlock(nn.Module):
activation=torch.nn.ReLU(),
):
"""
Initializes the ResidualBlock module.
Initialization of the :class:`ResidualBlock` class.
:param int input_dim: Dimension of the input to pass to the
feedforward linear layer.
:param int output_dim: Dimension of the output from the
residual layer.
:param int hidden_dim: Hidden dimension for mapping the input
(first block).
:param bool spectral_norm: Apply spectral normalization to feedforward
layers, defaults to False.
:param torch.nn.Module activation: Cctivation function after first
block.
:param int input_dim: The input dimension.
:param int output_dim: The output dimension.
:param int hidden_dim: The hidden dimension.
:param bool spectral_norm: If ``True``, the spectral normalization is
applied to the feedforward layers. Default is ``False``.
:param torch.nn.Module activation: The activation function.
Default is :class:`torch.nn.ReLU`.
"""
super().__init__()
@@ -64,10 +61,11 @@ class ResidualBlock(nn.Module):
self._l3 = self._spect_norm(nn.Linear(input_dim, output_dim))
def forward(self, x):
"""Forward pass for residual block layer.
"""
Forward pass.
:param torch.Tensor x: Input tensor for the residual layer.
:return: Output tensor for the residual layer.
:param torch.Tensor x: The input tensor.
:return: The output tensor.
:rtype: torch.Tensor
"""
y = self._activation(self._l1(x))
@@ -76,10 +74,10 @@ class ResidualBlock(nn.Module):
return y + x
def _spect_norm(self, x):
"""Perform spectral norm on the layers.
"""
Perform spectral normalization on the network layers.
:param x: A torch.nn.Module Linear layer
:type x: torch.nn.Module
:param torch.nn.Module x: A :class:`torch.nn.Linear` layer.
:return: The spectral norm of the layer
:rtype: torch.nn.Module
"""
@@ -88,37 +86,31 @@ class ResidualBlock(nn.Module):
class EnhancedLinear(torch.nn.Module):
"""
A wrapper class for enhancing a linear layer with activation and/or dropout.
Enhanced Linear layer class.
:param layer: The linear layer to be enhanced.
:type layer: torch.nn.Module
:param activation: The activation function to be applied after the linear
layer.
:type activation: torch.nn.Module
:param dropout: The dropout probability to be applied after the activation
(if provided).
:type dropout: float
:Example:
>>> linear_layer = torch.nn.Linear(10, 20)
>>> activation = torch.nn.ReLU()
>>> dropout_prob = 0.5
>>> enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob)
This class is a wrapper for enhancing a linear layer with activation and/or
dropout.
"""
def __init__(self, layer, activation=None, dropout=None):
"""
Initializes the EnhancedLinear module.
Initialization of the :class:`EnhancedLinear` class.
:param layer: The linear layer to be enhanced.
:type layer: torch.nn.Module
:param activation: The activation function to be applied after the
linear layer.
:type activation: torch.nn.Module
:param dropout: The dropout probability to be applied after the
activation (if provided).
:type dropout: float
:param torch.nn.Module layer: The linear layer to be enhanced.
:param torch.nn.Module activation: The activation function. Default is
``None``.
:param float dropout: The dropout probability. Default is ``None``.
:Example:
>>> linear_layer = torch.nn.Linear(10, 20)
>>> activation = torch.nn.ReLU()
>>> dropout_prob = 0.5
>>> enhanced_linear = EnhancedLinear(
... linear_layer,
... activation,
... dropout_prob
... )
"""
super().__init__()
@@ -146,23 +138,19 @@ class EnhancedLinear(torch.nn.Module):
def forward(self, x):
"""
Forward pass through the enhanced linear module.
Forward pass.
:param x: Input tensor.
:type x: torch.Tensor
:return: Output tensor after passing through the enhanced linear module.
:param torch.Tensor x: The input tensor.
:return: The output tensor.
:rtype: torch.Tensor
"""
return self._model(x)
def _drop(self, p):
"""
Applies dropout with probability p.
:param p: Dropout probability.
:type p: float
Apply dropout with probability p.
:param float p: Dropout probability.
:return: Dropout layer with the specified probability.
:rtype: torch.nn.Dropout
"""

View File

@@ -1,5 +1,5 @@
"""
TODO: Add title.
Module for spectral convolution blocks.
"""
import torch
@@ -10,24 +10,23 @@ from ...utils import check_consistency
######## 1D Spectral Convolution ###########
class SpectralConvBlock1D(nn.Module):
"""
PINA implementation of Spectral Convolution Block for one
dimensional tensors.
Spectral Convolution Block for one-dimensional tensors.
This class computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space.
The block expects an input of size [``batch``, ``input_numb_fields``, ``N``]
and returns an output of size [``batch``, ``output_numb_fields``, ``N``].
"""
def __init__(self, input_numb_fields, output_numb_fields, n_modes):
"""
The module computes the spectral convolution of the input with a linear
kernel in the
fourier space, and then it maps the input back to the physical
space.
The block expects an input of size ``[batch, input_numb_fields, N]``
and returns an output of size ``[batch, output_numb_fields, N]``.
r"""
Initialization of the :class:`SpectralConvBlock1D` class.
:param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output.
:param int n_modes: Number of modes to select, it must be at most equal
to the ``floor(N/2)+1``.
:param int n_modes: The number of modes to select for each dimension.
It must be at most equal to :math:`\floor(Nx/2)+1`.
"""
super().__init__()
@@ -54,30 +53,26 @@ class SpectralConvBlock1D(nn.Module):
def _compute_mult1d(self, input, weights):
"""
Compute the matrix multiplication of the input
with the linear kernel weights.
Compute the matrix multiplication of the input and the linear kernel
weights.
:param input: The input tensor, expect of size
``[batch, input_numb_fields, x]``.
:type input: torch.Tensor
:param weights: The kernel weights, expect of
size ``[input_numb_fields, output_numb_fields, x]``.
:type weights: torch.Tensor
:return: The matrix multiplication of the input
with the linear kernel weights.
:param torch.Tensor input: The input tensor. Expected of size
[``batch``, ``input_numb_fields``, ``N``].
:param torch.Tensor weights: The kernel weights. Expected of size
[``input_numb_fields``, ``output_numb_fields``, ``N``].
:return: The result of the matrix multiplication.
:rtype: torch.Tensor
"""
return torch.einsum("bix,iox->box", input, weights)
def forward(self, x):
"""
Forward computation for Spectral Convolution.
Forward pass.
:param x: The input tensor, expect of size
``[batch, input_numb_fields, x]``.
:type x: torch.Tensor
:return: The output tensor obtained from the
spectral convolution of size ``[batch, output_numb_fields, x]``.
:param torch.Tensor x: The input tensor. Expected of size
[``batch``, ``input_numb_fields``, ``N``].
:return: The input tensor. Expected of size
[``batch``, ``output_numb_fields``, ``N``].
:rtype: torch.Tensor
"""
batch_size = x.shape[0]
@@ -104,26 +99,29 @@ class SpectralConvBlock1D(nn.Module):
######## 2D Spectral Convolution ###########
class SpectralConvBlock2D(nn.Module):
"""
PINA implementation of spectral convolution block for two
dimensional tensors.
Spectral Convolution Block for two-dimensional tensors.
This class computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``]
and returns an output of size
[``batch``, ``output_numb_fields``, ``Nx``, ``Ny``].
"""
def __init__(self, input_numb_fields, output_numb_fields, n_modes):
"""
The module computes the spectral convolution of the input with a linear
kernel in the
fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny]``
and returns an output of size ``[batch, output_numb_fields, Nx, Ny]``.
r"""
Initialization of the :class:`SpectralConvBlock2D` class.
:param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each
dimension. It must be at most equal to the ``floor(Nx/2)+1`` and
``floor(Ny/2)+1``.
:param n_modes: The number of modes to select for each dimension.
It must be at most equal to :math:`\floor(Nx/2)+1`,
:math:`\floor(Ny/2)+1`.
:type n_modes: list[int] | tuple[int]
:raises ValueError: If the number of modes is not consistent.
:raises ValueError: If the number of modes is not a list or tuple.
"""
super().__init__()
@@ -178,30 +176,26 @@ class SpectralConvBlock2D(nn.Module):
def _compute_mult2d(self, input, weights):
"""
Compute the matrix multiplication of the input
with the linear kernel weights.
Compute the matrix multiplication of the input and the linear kernel
weights.
:param input: The input tensor, expect of size
``[batch, input_numb_fields, x, y]``.
:type input: torch.Tensor
:param weights: The kernel weights, expect of
size ``[input_numb_fields, output_numb_fields, x, y]``.
:type weights: torch.Tensor
:return: The matrix multiplication of the input
with the linear kernel weights.
:param torch.Tensor input: The input tensor. Expected of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``].
:param torch.Tensor weights: The kernel weights. Expected of size
[``input_numb_fields``, ``output_numb_fields``, ``Nx``, ``Ny``].
:return: The result of the matrix multiplication.
:rtype: torch.Tensor
"""
return torch.einsum("bixy,ioxy->boxy", input, weights)
def forward(self, x):
"""
Forward computation for Spectral Convolution.
Forward pass.
:param x: The input tensor, expect of size
``[batch, input_numb_fields, x, y]``.
:type x: torch.Tensor
:return: The output tensor obtained from the
spectral convolution of size ``[batch, output_numb_fields, x, y]``.
:param torch.Tensor x: The input tensor. Expected of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``].
:return: The input tensor. Expected of size
[``batch``, ``output_numb_fields``, ``Nx``, ``Ny``].
:rtype: torch.Tensor
"""
@@ -235,27 +229,29 @@ class SpectralConvBlock2D(nn.Module):
######## 3D Spectral Convolution ###########
class SpectralConvBlock3D(nn.Module):
"""
PINA implementation of spectral convolution block for three
dimensional tensors.
Spectral Convolution Block for three-dimensional tensors.
This class computes the spectral convolution of the input with a linear
kernel in the fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``, ``Nz``]
and returns an output of size
[``batch``, ``output_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
"""
def __init__(self, input_numb_fields, output_numb_fields, n_modes):
"""
The module computes the spectral convolution of the input with a
linear kernel in the
fourier space, and then it maps the input back to the physical
space.
The block expects an input of size
``[batch, input_numb_fields, Nx, Ny, Nz]``
and returns an output of size
``[batch, output_numb_fields, Nx, Ny, Nz]``.
r"""
Initialization of the :class:`SpectralConvBlock3D` class.
:param int input_numb_fields: The number of channels for the input.
:param int output_numb_fields: The number of channels for the output.
:param list | tuple n_modes: Number of modes to select for each
dimension. It must be at most equal to the ``floor(Nx/2)+1``,
``floor(Ny/2)+1`` and ``floor(Nz/2)+1``.
:param n_modes: The number of modes to select for each dimension.
It must be at most equal to :math:`\floor(Nx/2)+1`,
:math:`\floor(Ny/2)+1`, :math:`\floor(Nz/2)+1`.
:type n_modes: list[int] | tuple[int]
:raises ValueError: If the number of modes is not consistent.
:raises ValueError: If the number of modes is not a list or tuple.
"""
super().__init__()
@@ -334,31 +330,27 @@ class SpectralConvBlock3D(nn.Module):
def _compute_mult3d(self, input, weights):
"""
Compute the matrix multiplication of the input
with the linear kernel weights.
Compute the matrix multiplication of the input and the linear kernel
weights.
:param input: The input tensor, expect of size
``[batch, input_numb_fields, x, y, z]``.
:type input: torch.Tensor
:param weights: The kernel weights, expect of
size ``[input_numb_fields, output_numb_fields, x, y, z]``.
:type weights: torch.Tensor
:return: The matrix multiplication of the input
with the linear kernel weights.
:param torch.Tensor input: The input tensor. Expected of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
:param torch.Tensor weights: The kernel weights. Expected of size
[``input_numb_fields``, ``output_numb_fields``, ``Nx``, ``Ny``,
``Nz``].
:return: The result of the matrix multiplication.
:rtype: torch.Tensor
"""
return torch.einsum("bixyz,ioxyz->boxyz", input, weights)
def forward(self, x):
"""
Forward computation for Spectral Convolution.
Forward pass.
:param x: The input tensor, expect of size
``[batch, input_numb_fields, x, y, z]``.
:type x: torch.Tensor
:return: The output tensor obtained from the
spectral convolution of size
``[batch, output_numb_fields, x, y, z]``.
:param torch.Tensor x: The input tensor. Expected of size
[``batch``, ``input_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
:return: The input tensor. Expected of size
[``batch``, ``output_numb_fields``, ``Nx``, ``Ny``, ``Nz``].
:rtype: torch.Tensor
"""

View File

@@ -1,5 +1,5 @@
"""
TODO: Add description
Module for the Stride class.
"""
import torch
@@ -7,14 +7,16 @@ import torch
class Stride:
"""
TODO
Stride class for continous convolution.
"""
def __init__(self, dict_):
"""Stride class for continous convolution
"""
Initialization of the :class:`Stride` class.
:param param: type of continuous convolution
:type param: string
:param dict dict_: Dictionary having as keys the domain size ``domain``,
the starting position of the filter ``start``, the jump size for the
filter ``jump``, and the direction of the filter ``direction``.
"""
self._dict_stride = dict_
@@ -22,52 +24,50 @@ class Stride:
self._stride_discrete = self._create_stride_discrete(dict_)
def _create_stride_discrete(self, my_dict):
"""Creating the list for applying the filter
"""
Create a tensor of positions where to apply the filter.
:param dict my_dict_: Dictionary having as keys the domain size
``domain``, the starting position of the filter ``start``, the jump
size for the filter ``jump``, and the direction of the filter
``direction``.
:raises IndexError: Values in the dict must have all same length.
:raises ValueError: Domain values must be greater than 0.
:raises ValueError: Direction must be either equal to ``1``, ``-1`` or
``0``.
:raises IndexError: Direction and jumps must be zero in the same index.
:return: The positions for the filter
:rtype: torch.Tensor
:param my_dict: Dictionary with the following arguments:
domain size, starting position of the filter, jump size
for the filter and direction of the filter
:type my_dict: dict
:raises IndexError: Values in the dict must have all same length
:raises ValueError: Domain values must be greater than 0
:raises ValueError: Direction must be either equal to 1, -1 or 0
:raises IndexError: Direction and jumps must have zero in the same
index
:return: list of positions for the filter
:rtype: list
:Example:
>>> stride = {"domain": [4, 4],
"start": [-4, 2],
"jump": [2, 2],
"direction": [1, 1],
}
>>> create_stride(stride)
[[-4.0, 2.0], [-4.0, 4.0], [-2.0, 2.0], [-2.0, 4.0]]
>>> stride_dict = {
... "domain": [4, 4],
... "start": [-4, 2],
... "jump": [2, 2],
... "direction": [1, 1],
... }
>>> Stride(stride_dict)
"""
# we must check boundaries of the input as well
domain, start, jumps, direction = my_dict.values()
# checking
if not all(len(s) == len(domain) for s in my_dict.values()):
raise IndexError("values in the dict must have all same length")
raise IndexError("Values in the dict must have all same length")
if not all(v >= 0 for v in domain):
raise ValueError("domain values must be greater than 0")
raise ValueError("Domain values must be greater than 0")
if not all(v in (0, -1, 1) for v in direction):
raise ValueError("direction must be either equal to 1, -1 or 0")
raise ValueError("Direction must be either equal to 1, -1 or 0")
seq_jumps = [i for i, e in enumerate(jumps) if e == 0]
seq_direction = [i for i, e in enumerate(direction) if e == 0]
if seq_direction != seq_jumps:
raise IndexError(
"direction and jumps must have zero in the same index"
"Direction and jumps must have zero in the same index"
)
if seq_jumps:

View File

@@ -1,5 +1,5 @@
"""
TODO
Module for utility functions for the convolutional layer.
"""
import torch
@@ -7,7 +7,13 @@ import torch
def check_point(x, current_stride, dim):
"""
TODO
Check if the point is in the current stride.
:param torch.Tensor x: The input data.
:param int current_stride: The current stride.
:param int dim: The shape of the filter.
:return: The indeces of the points in the current stride.
:rtype: torch.Tensor
"""
max_stride = current_stride + dim
indeces = torch.logical_and(
@@ -17,13 +23,12 @@ def check_point(x, current_stride, dim):
def map_points_(x, filter_position):
"""Mapping function n dimensional case
"""
The mapping function for n-dimensional case.
:param x: input data of two dimension
:type x: torch.tensor
:param filter_position: position of the filter
:type dim: list[numeric]
:return: data mapped inplace
:param torch.Tensor x: The two-dimensional input data.
:param list[int] filter_position: The position of the filter.
:return: The data mapped in-place.
:rtype: torch.tensor
"""
x.add_(-filter_position)
@@ -32,14 +37,20 @@ def map_points_(x, filter_position):
def optimizing(f):
"""Decorator for calling a function just once
"""
Decorator to call the function only once.
:param f: python function
:type f: function
:type f: Callable
"""
def wrapper(*args, **kwargs):
"""
Wrapper function.
:param args: The arguments of the function.
:param kwargs: The keyword arguments of the function.
"""
if kwargs["type_"] == "forward":
if not wrapper.has_run_inverse:
wrapper.has_run_inverse = True