update doc

This commit is contained in:
Dario Coscia
2025-03-17 12:23:26 +01:00
committed by FilippoOlivo
parent ae1fd2680f
commit 480140dd31
33 changed files with 265 additions and 196 deletions

View File

@@ -7,9 +7,7 @@ from ..utils import check_consistency, is_function
class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta):
r"""
The
:class:`~pina.adaptive_function.adaptive_func_interface.\
AdaptiveActivationFunctionInterface`
The :class:`AdaptiveActivationFunctionInterface`
class makes a :class:`torch.nn.Module` activation function into an adaptive
trainable activation function. If one wants to create an adpative activation
function, this class must be use as base class.

View File

@@ -16,29 +16,23 @@ from ..collector import Collector
class DummyDataloader:
"""
Dataloader used when batch size is ``None``. It returns the entire dataset
in a single batch.
"""
def __init__(self, dataset):
"""
Preprare a dataloader object which will return the entire dataset
in a single batch. Depending on the number of GPUs, the dataset is
managed as follows:
Prepare a dataloader object that returns the entire dataset in a single
batch. Depending on the number of GPUs, the dataset is managed
as follows:
- **Distributed Environment** (multiple GPUs):
- Divides the dataset across processes using the rank and world
size.
- Fetches only the portion of data corresponding to the current
process.
- **Non-Distributed Environment** (single GPU):
- Fetches the entire dataset.
- **Distributed Environment** (multiple GPUs): Divides dataset across
processes using the rank and world size. Fetches only portion of
data corresponding to the current process.
- **Non-Distributed Environment** (single GPU): Fetches the entire
dataset.
:param dataset: The dataset object to be processed.
:type dataset: PinaDataset
:param PinaDataset dataset: The dataset object to be processed.
.. note:: This data loader is used when the batch size is ``None``.
.. note::
This dataloader is used when the batch size is ``None``.
"""
if (
@@ -84,8 +78,10 @@ class Collator:
Initialize the object, setting the collate function based on whether
automatic batching is enabled or not.
:param dict max_conditions_lengths: dict containing the maximum number
of data points to consider in a single batch for each condition.
:param dict max_conditions_lengths: ``dict`` containing the maximum
number of data points to consider in a single batch for
each condition.
:param bool automatic_batching: Whether to enable automatic batching.
:param PinaDataset dataset: The dataset where the data is stored.
"""

View File

@@ -276,7 +276,8 @@ class PinaGraphDataset(PinaDataset):
:param data: List of items to collate in a single batch.
:type data: list[Data] | list[Graph]
:return: Batch object.
:rtype: Batch | LabelBatch
:rtype: :class:`~torch_geometric.data.Batch`
| :class:`~pina.graph.LabelBatch`
"""
if isinstance(data[0], Data):

View File

@@ -399,8 +399,9 @@ class LabelBatch(Batch):
:param data_list: List of :class:`~torch_geometric.data.Data` or
:class:`~pina.graph.Graph` objects.
:type data_list: list[Data] | list[Graph]
:return: A :class:`Batch` object containing the input data.
:rtype: Batch
:return: A :class:`~torch_geometric.data.Batch` object containing
the input data.
:rtype: :class:`~torch_geometric.data.Batch`
"""
# Store the labels of Data/Graph objects (all data have the same labels)
# If the data do not contain labels, labels is an empty dictionary,

View File

@@ -389,14 +389,15 @@ class LabelTensor(torch.Tensor):
def requires_grad_(self, mode=True):
"""
Override the requires_grad_ method to handle the labels in the new
tensor. For more details, see :meth:`torch.Tensor.requires_grad_`.
Override the :meth:`~torch.Tensor.requires_grad_` method to handle
the labels in the new tensor.
For more details, see :meth:`~torch.Tensor.requires_grad_`.
:param bool mode: A boolean value indicating whether the tensor should
track gradients.If `True`, the tensor will track gradients;
if `False`, it will not.
:return: The :class:`~pina.label_tensor.LabelTensor` itself with the
updated `requires_grad` state and retained labels.
updated ``requires_grad`` state and retained labels.
:rtype: LabelTensor
"""

View File

@@ -15,6 +15,7 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta):
batch_size, :math:`N_{in}` is the number of input fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
In particular:
* :math:`D` is the number of spatial variables + 1. The last column must
contain the field value.
* :math:`N_{in}` represents the number of function components.

View File

@@ -15,10 +15,13 @@ class ContinuousConvBlock(BaseContinuousConv):
batch_size, :math:`N_{in}` is the number of input fields, :math:`N`
the number of points in the mesh, :math:`D` the dimension of the problem.
In particular:
* :math:`D` is the number of spatial variables + 1. The last column must
contain the field value.
* :math:`N_{in}` represents the number of function components.
For instance, a vectorial function :math:`f = [f_1, f_2]` has
contain the field value. For example for 2D problems :math:`D=3` and
the tensor will be something like ``[first coordinate, second
coordinate, field value]``.
* :math:`N_{in}` represents the number of vectorial function presented.
For example a vectorial function :math:`f = [f_1, f_2]` will have
:math:`N_{in}=2`.
.. seealso::

View File

@@ -412,7 +412,8 @@ class DeepONet(MIONet):
Differently, for a :class:`torch.Tensor` only a list of integers can
be passed for ``input_indeces_branch_net`` and
``input_indeces_trunk_net``.
.. warning::
.. warning::
No checks are performed in the forward pass to verify if the input
is instance of either :class:`~pina.label_tensor.LabelTensor` or
:class:`torch.Tensor`. In general, in case of a

View File

@@ -36,7 +36,7 @@ class FeedForward(torch.nn.Module):
:param int inner_size: The number of neurons for each hidden layer.
Default is ``20``.
:param int n_layers: The number of hidden layers. Default is ``2``.
::param func: The activation function. If a list is passed, it must have
:param func: The activation function. If a list is passed, it must have
the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`.
@@ -144,7 +144,7 @@ class ResidualFeedForward(torch.nn.Module):
:param int inner_size: The number of neurons for each hidden layer.
Default is ``20``.
:param int n_layers: The number of hidden layers. Default is ``2``.
::param func: The activation function. If a list is passed, it must have
:param func: The activation function. If a list is passed, it must have
the same length as ``n_layers``. If a single function is passed, it
is used for all layers, except for the last one.
Default is :class:`torch.nn.Tanh`.

View File

@@ -274,7 +274,7 @@ class FNO(KernelNeuralOperator):
layers=None,
):
"""
param torch.nn.Module lifting_net: The lifting neural network mapping
:param torch.nn.Module lifting_net: The lifting neural network mapping
the input to its hidden dimension.
:param torch.nn.Module projecting_net: The projection neural network
mapping the hidden representation to the output function.
@@ -318,22 +318,24 @@ class FNO(KernelNeuralOperator):
def forward(self, x):
"""
Forward pass for the :class:`FourierNeuralOperator` model.
Forward pass for the :class:`FourierNeuralOperator` model.
The ``lifting_net`` maps the input to the hidden dimension.
Then, several layers of Fourier blocks are applied. Finally, the
``projection_net`` maps the hidden representation to the output
function.
The ``lifting_net`` maps the input to the hidden dimension.
Then, several layers of Fourier blocks are applied. Finally, the
``projection_net`` maps the hidden representation to the output
function.
: param x: The input tensor for performing the computation. Depending
on the ``dimensions`` in the initialization, it expects a tensor
with the following shapes:
* 1D tensors: ``[batch, X, channels]``
* 2D tensors: ``[batch, X, Y, channels]``
* 3D tensors: ``[batch, X, Y, Z, channels]``
:type x: torch.Tensor | LabelTensor
:return: The output tensor.
:rtype: torch.Tensor
:param x: The input tensor for performing the computation. Depending
on the ``dimensions`` in the initialization, it expects a tensor
with the following shapes:
* 1D tensors: ``[batch, X, channels]``
* 2D tensors: ``[batch, X, Y, channels]``
* 3D tensors: ``[batch, X, Y, Z, channels]``
:type x: torch.Tensor | LabelTensor
:return: The output tensor.
:rtype: torch.Tensor
"""
if isinstance(x, LabelTensor):

View File

@@ -8,9 +8,9 @@ from .kernel_neural_operator import KernelNeuralOperator
class GraphNeuralKernel(torch.nn.Module):
"""
Graph Neural Kernel model class.
Graph Neural Operator kernel model class.
This class implements the Graph Neural Kernel network.
This class implements the Graph Neural Operator kernel network.
.. seealso::
@@ -18,8 +18,7 @@ class GraphNeuralKernel(torch.nn.Module):
Liu, B., Bhattacharya, K., Stuart, A., Anandkumar, A. (2020).
*Neural Operator: Graph Kernel Network for Partial Differential
Equations*.
DOI: `arXiv preprint arXiv:2003.03485.
<https://arxiv.org/abs/2003.03485>`_
DOI: `arXiv preprint arXiv:2003.03485 <https://arxiv.org/abs/2003.03485>`_
"""
def __init__(
@@ -171,7 +170,7 @@ class GraphNeuralOperator(KernelNeuralOperator):
"""
Initialization of the :class:`GraphNeuralOperator` class.
param torch.nn.Module lifting_operator: The lifting neural network
:param torch.nn.Module lifting_operator: The lifting neural network
mapping the input to its hidden dimension.
:param torch.nn.Module projection_operator: The projection neural
network mapping the hidden representation to the output function.

View File

@@ -17,8 +17,9 @@ class TorchOptimizer(Optimizer):
:param torch.optim.Optimizer optimizer_class: A
:class:`torch.optim.Optimizer` class.
:param dict kwargs: Additional parameters passed to `optimizer_class`,
see more: <https://pytorch.org/docs/stable/optim.html#algorithms>_.
:param dict kwargs: Additional parameters passed to ``optimizer_class``,
see more
`here <https://pytorch.org/docs/stable/optim.html#algorithms>`_.
"""
check_consistency(optimizer_class, torch.optim.Optimizer, subclass=True)

View File

@@ -23,8 +23,9 @@ class TorchScheduler(Scheduler):
:param torch.optim.LRScheduler scheduler_class: A
:class:`torch.optim.LRScheduler` class.
:param dict kwargs: Additional parameters passed to `scheduler_class`,
see more: <https://pytorch.org/docs/stable/optim.html#algorithms>_.
:param dict kwargs: Additional parameters passed to ``scheduler_class``,
see more
`here <https://pytorch.org/docs/stable/optim.html#algorithms>_`.
"""
check_consistency(scheduler_class, LRScheduler, subclass=True)

View File

@@ -47,7 +47,7 @@ class AbstractProblem(metaclass=ABCMeta):
Get batching dimension.
:return: The batching dimension.
:rtype int
:rtype: int
"""
return self._batching_dimension
@@ -85,7 +85,7 @@ class AbstractProblem(metaclass=ABCMeta):
points.
:return: The discretised domains.
:rtype dict
:rtype: dict
"""
return self._discretised_domains
@@ -178,13 +178,28 @@ class AbstractProblem(metaclass=ABCMeta):
chebyshev sampling, ``chebyshev``; grid sampling ``grid``.
:param domains: The domains from which to sample. Default is ``all``.
:type domains: str | list[str]
:param dict sample_rules: A dictionary of custom sampling rules.
:param dict sample_rules: A dictionary defining custom sampling rules
for input variables. If provided, it must contain a dictionary
specifying the sampling rule for each variable, overriding the
``n`` and ``mode`` arguments. Each key must correspond to the
input variables from
:meth:~pina.problem.AbstractProblem.input_variables, and its value
should be another dictionary with
two keys: ``n`` (number of points to sample) and ``mode``
(sampling method). Defaults to None.
:raises RuntimeError: If both ``n`` and ``sample_rules`` are specified.
:raises RuntimeError: If neither ``n`` nor ``sample_rules`` are set.
:Example:
>>> problem.discretise_domain(n=10, mode='grid')
>>> problem.discretise_domain(n=10, mode='grid', domains=['gamma1'])
>>> problem.discretise_domain(
... sample_rules={
... 'x': {'n': 10, 'mode': 'grid'},
... 'y': {'n': 100, 'mode': 'grid'}
... },
... domains=['D']
... )
.. warning::
``random`` is currently the only implemented ``mode`` for all
@@ -197,6 +212,11 @@ class AbstractProblem(metaclass=ABCMeta):
:class:`~pina.domain.intersection_domain.Intersection`.
The modes ``latin`` or ``lh``, ``chebyshev``, ``grid`` are only
implemented for :class:`~pina.domain.cartesian.CartesianDomain`.
.. warning::
If custom discretisation is applied by setting ``sample_rules`` not
to ``None``, then the discretised domain must be of class
:class:`~pina.domain.cartesian.CartesianDomain`
"""
# check consistecy n, mode, variables, locations

View File

@@ -82,7 +82,7 @@ class RBAPINN(PINN):
:param AbstractProblem problem: The problem to be solved.
:param torch.nn.Module model: The neural network model to be used.
param Optimizer optimizer: The optimizer to be used.
:param Optimizer optimizer: The optimizer to be used.
If `None`, the :class:`torch.optim.Adam` optimizer is used.
Default is ``None``.
:param Scheduler scheduler: Learning rate scheduler.

View File

@@ -13,8 +13,9 @@ class Trainer(lightning.pytorch.Trainer):
PINA custom Trainer class to extend the standard Lightning functionality.
This class enables specific features or behaviors required by the PINA
framework. It modifies the standard :class:`lightning.pytorch.Trainer` class
to better support the training process in PINA.
framework. It modifies the standard
:class:`lightning.pytorch.Trainer <lightning.pytorch.trainer.trainer.Trainer>`
class to better support the training process in PINA.
"""
def __init__(
@@ -207,7 +208,9 @@ class Trainer(lightning.pytorch.Trainer):
"""
Manage the training process of the solver.
:param dict kwargs: Additional keyword arguments.
:param dict kwargs: Additional keyword arguments. See `pytorch-lightning
Trainer API <https://lightning.ai/docs/pytorch/stable/common/trainer.html#trainer-class-api>`_
for details.
"""
return super().fit(self.solver, datamodule=self.data_module, **kwargs)
@@ -215,7 +218,9 @@ class Trainer(lightning.pytorch.Trainer):
"""
Manage the test process of the solver.
:param dict kwargs: Additional keyword arguments.
:param dict kwargs: Additional keyword arguments. See `pytorch-lightning
Trainer API <https://lightning.ai/docs/pytorch/stable/common/trainer.html#trainer-class-api>`_
for details.
"""
return super().test(self.solver, datamodule=self.data_module, **kwargs)