diff --git a/docs/source/_rst/_code.rst b/docs/source/_rst/_code.rst index fb0674e..c7cf79c 100644 --- a/docs/source/_rst/_code.rst +++ b/docs/source/_rst/_code.rst @@ -11,11 +11,11 @@ The high-level structure of the package is depicted in our API. The pipeline to solve differential equations with PINA follows just five steps: - 1. Define the `Problem`_ the user aim to solve - 2. Generate data using built in `Domains`_, or load high level simulation results as :doc:`LabelTensor ` + 1. Define the `Problems`_ the user aim to solve + 2. Generate data using built in `Geometrical Domains`_, or load high level simulation results as :doc:`LabelTensor ` 3. Choose or build one or more `Models`_ to solve the problem 4. Choose a solver across PINA available `Solvers`_, or build one using the :doc:`SolverInterface ` - 5. Train the model with the PINA :doc:`Trainer `, enhance the train with `Callback`_ + 5. Train the model with the PINA :doc:`Trainer `, enhance the train with `Callbacks`_ Trainer, Dataset and Datamodule @@ -34,6 +34,7 @@ Data Types LabelTensor Graph + LabelBatch Graphs Structures @@ -41,7 +42,6 @@ Graphs Structures .. toctree:: :titlesonly: - Graph GraphBuilder RadiusGraph KNNGraph @@ -98,7 +98,8 @@ Models FNO AveragingNeuralOperator LowRankNeuralOperator - GraphNeuralOperator + GraphNeuralOperator + GraphNeuralKernel Blocks ------------- @@ -112,7 +113,10 @@ Blocks Fourier Block Averaging Block Low Rank Block + Graph Neural Operator Block + Continuous Convolution Interface Continuous Convolution Block + Orthogonal Block Reduction and Embeddings @@ -144,7 +148,7 @@ Adaptive Activation Functions .. toctree:: :titlesonly: - Adaptive Function Interface + Adaptive Function Interface Adaptive ReLU Adaptive Sigmoid Adaptive Tanh @@ -165,10 +169,10 @@ Equations and Differential Operators .. toctree:: :titlesonly: - EquationInterface - Equation - SystemEquation - Equation Factory + EquationInterface + Equation + SystemEquation + Equation Factory Differential Operators @@ -200,7 +204,7 @@ Problems Zoo Geometrical Domains ---------------------- +-------------------- .. toctree:: :titlesonly: @@ -222,8 +226,8 @@ Domain Operations Difference Exclusion -Callback --------------------- +Callbacks +----------- .. toctree:: :titlesonly: diff --git a/docs/source/_rst/adaptive_function/AdaptiveActivationFunctionInterface.rst b/docs/source/_rst/adaptive_function/AdaptiveActivationFunctionInterface.rst index d8c9251..cf8b655 100644 --- a/docs/source/_rst/adaptive_function/AdaptiveActivationFunctionInterface.rst +++ b/docs/source/_rst/adaptive_function/AdaptiveActivationFunctionInterface.rst @@ -3,6 +3,6 @@ AdaptiveActivationFunctionInterface .. currentmodule:: pina.adaptive_function.adaptive_function_interface -.. automodule:: pina.adaptive_function.adaptive_functiontion_interface +.. automodule:: pina.adaptive_function.adaptive_function_interface :members: :show-inheritance: diff --git a/docs/source/_rst/callback/adaptive_refinment_callbacks.rst b/docs/source/_rst/callback/adaptive_refinment_callback.rst similarity index 100% rename from docs/source/_rst/callback/adaptive_refinment_callbacks.rst rename to docs/source/_rst/callback/adaptive_refinment_callback.rst diff --git a/docs/source/_rst/callback/optimizer_callbacks.rst b/docs/source/_rst/callback/optimizer_callback.rst similarity index 100% rename from docs/source/_rst/callback/optimizer_callbacks.rst rename to docs/source/_rst/callback/optimizer_callback.rst diff --git a/docs/source/_rst/callback/processing_callbacks.rst b/docs/source/_rst/callback/processing_callback.rst similarity index 76% rename from docs/source/_rst/callback/processing_callbacks.rst rename to docs/source/_rst/callback/processing_callback.rst index 71f3d1f..a06bb8b 100644 --- a/docs/source/_rst/callback/processing_callbacks.rst +++ b/docs/source/_rst/callback/processing_callback.rst @@ -1,7 +1,7 @@ Processing callbacks ======================= -.. currentmodule:: pina.callbacks.processing_callback +.. currentmodule:: pina.callback.processing_callback .. autoclass:: MetricTracker :members: :show-inheritance: diff --git a/docs/source/_rst/domain/domain.rst b/docs/source/_rst/domain/domain.rst index 654bb48..27adcf0 100644 --- a/docs/source/_rst/domain/domain.rst +++ b/docs/source/_rst/domain/domain.rst @@ -4,6 +4,6 @@ Domain .. automodule:: pina.domain.domain_interface -.. autoclass:: Domain +.. autoclass:: DomainInterface :members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/graph/label_batch.rst b/docs/source/_rst/graph/label_batch.rst new file mode 100644 index 0000000..7cd4d26 --- /dev/null +++ b/docs/source/_rst/graph/label_batch.rst @@ -0,0 +1,9 @@ +LabelBatch +=========== +.. currentmodule:: pina.graph + + +.. autoclass:: LabelBatch + :members: + :private-members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/model/block/average_neural_operator_block.py.rst b/docs/source/_rst/model/block/average_neural_operator_block.rst similarity index 100% rename from docs/source/_rst/model/block/average_neural_operator_block.py.rst rename to docs/source/_rst/model/block/average_neural_operator_block.rst diff --git a/docs/source/_rst/model/block/convolution_interface.rst b/docs/source/_rst/model/block/convolution_interface.rst new file mode 100644 index 0000000..f8e61c1 --- /dev/null +++ b/docs/source/_rst/model/block/convolution_interface.rst @@ -0,0 +1,8 @@ +Continuous Convolution Interface +================================== +.. currentmodule:: pina.model.block.convolution + +.. autoclass:: BaseContinuousConv + :members: + :show-inheritance: + :noindex: diff --git a/docs/source/_rst/model/fourier_integral_kernel.rst b/docs/source/_rst/model/fourier_integral_kernel.rst index e45ba17..b1fb484 100644 --- a/docs/source/_rst/model/fourier_integral_kernel.rst +++ b/docs/source/_rst/model/fourier_integral_kernel.rst @@ -1,6 +1,6 @@ FourierIntegralKernel ========================= -.. currentmodule:: pina.model.fno +.. currentmodule:: pina.model.fourier_neural_operator .. autoclass:: FourierIntegralKernel :members: diff --git a/docs/source/_rst/model/graph_neural_operator_integral_kernel.rst b/docs/source/_rst/model/graph_neural_operator_integral_kernel.rst new file mode 100644 index 0000000..cf15a31 --- /dev/null +++ b/docs/source/_rst/model/graph_neural_operator_integral_kernel.rst @@ -0,0 +1,7 @@ +GraphNeuralKernel +======================= +.. currentmodule:: pina.model.graph_neural_operator + +.. autoclass:: GraphNeuralKernel + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/solver/physic_informed_solver/causal_pinn.rst b/docs/source/_rst/solver/physic_informed_solver/causal_pinn.rst index a418776..4899001 100644 --- a/docs/source/_rst/solver/physic_informed_solver/causal_pinn.rst +++ b/docs/source/_rst/solver/physic_informed_solver/causal_pinn.rst @@ -1,6 +1,6 @@ CausalPINN ============== -.. currentmodule:: pina.solver.physic_informed_solver.causalpinn +.. currentmodule:: pina.solver.physic_informed_solver.causal_pinn .. autoclass:: CausalPINN :members: diff --git a/docs/source/_rst/solver/physic_informed_solver/rba_pinn.rst b/docs/source/_rst/solver/physic_informed_solver/rba_pinn.rst index 18899bd..63d72f2 100644 --- a/docs/source/_rst/solver/physic_informed_solver/rba_pinn.rst +++ b/docs/source/_rst/solver/physic_informed_solver/rba_pinn.rst @@ -1,6 +1,6 @@ RBAPINN ======== -.. currentmodule:: pina.solver.physic_informed_solver.rbapinn +.. currentmodule:: pina.solver.physic_informed_solver.rba_pinn .. autoclass:: RBAPINN :members: diff --git a/docs/source/_tutorial.rst b/docs/source/_tutorial.rst index c12bfa6..745e575 100644 --- a/docs/source/_tutorial.rst +++ b/docs/source/_tutorial.rst @@ -1,46 +1,35 @@ PINA Tutorials -============== +====================== + In this folder we collect useful tutorials in order to understand the principles and the potential of **PINA**. Getting started with PINA ------------------------- -.. toctree:: - :maxdepth: 3 - :titlesonly: - `Introduction to PINA for Physics Informed Neural Networks training `_ - `Introduction to PINA Equation class `_ - `PINA and PyTorch Lightning, training tips and visualizations `_ - `Building custom geometries with PINA Location class `_ +- `Introduction to PINA for Physics Informed Neural Networks training `_ +- `Introduction to PINA Equation class `_ +- `PINA and PyTorch Lightning, training tips and visualizations `_ +- `Building custom geometries with PINA Location class `_ Physics Informed Neural Networks -------------------------------- -.. toctree:: - :maxdepth: 3 - :titlesonly: - `Two dimensional Poisson problem using Extra Features Learning `_ - `Two dimensional Wave problem with hard constraint `_ - `Resolution of a 2D Poisson inverse problem `_ - `Periodic Boundary Conditions for Helmotz Equation `_ - `Multiscale PDE learning with Fourier Feature Network `_ +- `Two dimensional Poisson problem using Extra Features Learning `_ +- `Two dimensional Wave problem with hard constraint `_ +- `Resolution of a 2D Poisson inverse problem `_ +- `Periodic Boundary Conditions for Helmotz Equation `_ +- `Multiscale PDE learning with Fourier Feature Network `_ Neural Operator Learning ------------------------ -.. toctree:: - :maxdepth: 3 - :titlesonly: - `Two dimensional Darcy flow using the Fourier Neural Operator `_ - `Time dependent Kuramoto Sivashinsky equation using the Averaging Neural Operator `_ +- `Two dimensional Darcy flow using the Fourier Neural Operator `_ +- `Time dependent Kuramoto Sivashinsky equation using the Averaging Neural Operator `_ Supervised Learning ------------------- -.. toctree:: - :maxdepth: 3 - :titlesonly: - `Unstructured convolutional autoencoder via continuous convolution `_ - `POD-RBF and POD-NN for reduced order modeling `_ +- `Unstructured convolutional autoencoder via continuous convolution `_ +- `POD-RBF and POD-NN for reduced order modeling `_ diff --git a/docs/source/conf.py b/docs/source/conf.py index d0ddc09..ca0fe42 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -19,84 +19,84 @@ import importlib.metadata # -- Project information ----------------------------------------------------- -_DISTRIBUTION_METADATA = importlib.metadata.metadata('pina-mathlab') -project = _DISTRIBUTION_METADATA['Name'] -copyright = _DISTRIBUTION_METADATA['License-File'] -author = "PINA contributors" -version = _DISTRIBUTION_METADATA['Version'] +_DISTRIBUTION_METADATA = importlib.metadata.metadata("pina-mathlab") +project = _DISTRIBUTION_METADATA["Name"] +copyright = _DISTRIBUTION_METADATA["License-File"] +author = "PINA Contributors" +version = _DISTRIBUTION_METADATA["Version"] -sys.path.insert(0, os.path.abspath('../sphinx_extensions')) # extension to remove paramref link from lightinig +sys.path.insert( + 0, os.path.abspath("../sphinx_extensions") +) # -- General configuration ------------------------------------------------ extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.napoleon', - 'sphinx.ext.intersphinx', - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.viewcode', - 'sphinx.ext.mathjax', - 'sphinx.ext.intersphinx', - 'paramref_extension', # this extension is made to remove paramref links from lightining doc - 'sphinx_copybutton', - 'sphinx_design' + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.doctest", + "sphinx.ext.napoleon", + "sphinx.ext.intersphinx", + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.viewcode", + "sphinx.ext.mathjax", + "sphinx.ext.intersphinx", + "paramref_extension", # this extension is made to remove paramref links from lightining doc + "sphinx_copybutton", + "sphinx_design", ] -# The root document. -root_doc = 'index' - # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'docstrings', 'nextgen', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["build", "docstrings", "nextgen", "Thumbs.db", ".DS_Store"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = 'literal' +# default_role = 'literal' # Generate the API documentation when building autosummary_generate = True numpydoc_show_class_members = False intersphinx_mapping = { - 'python': ('http://docs.python.org/3', None), - 'matplotlib': ('https://matplotlib.org/stable', None), - 'torch': ('https://pytorch.org/docs/stable/', None), - 'lightning.pytorch': ("https://lightning.ai/docs/pytorch/stable/", None), - } + "python": ("http://docs.python.org/3", None), + "matplotlib": ("https://matplotlib.org/stable", None), + "torch": ("https://pytorch.org/docs/stable/", None), + "lightning.pytorch": ("https://lightning.ai/docs/pytorch/stable/", None), + "torch_geometric": ( + "https://pytorch-geometric.readthedocs.io/en/latest/", + None, + ), +} -nitpicky = True -nitpick_ignore = [ - # ('py:meth', 'lightning.pytorch.core.module.LightningModule.log'), - # ('py:meth', 'lightning.pytorch.core.module.LightningModule.log_dict'), - # ('py:exc', 'MisconfigurationException'), - # ('py:func', 'torch.inference_mode'), - # ('py:func', 'torch.no_grad'), - # ('py:class', 'torch.utils.data.DistributedSampler'), - # ('py:class', 'pina.model.layers.convolution.BaseContinuousConv'), - # ('py:class', 'Module'), - # ('py:class', 'torch.nn.modules.loss._Loss'), # TO FIX - # ('py:class', 'torch.optim.LRScheduler'), # TO FIX - - ] +# nitpicky = True +# nitpick_ignore = [ +# ("py:meth", "lightning.pytorch.core.module.LightningModule.log"), +# ("py:meth", "lightning.pytorch.core.module.LightningModule.log_dict"), +# ("py:exc", "MisconfigurationException"), +# ("py:func", "torch.inference_mode"), +# ("py:func", "torch.no_grad"), +# ("py:class", "torch.utils.data.DistributedSampler"), +# ("py:class", "pina.model.layers.convolution.BaseContinuousConv"), +# ("py:class", "Module"), +# ("py:class", "torch.nn.modules.loss._Loss"), # TO FIX +# ("py:class", "torch.optim.LRScheduler"), # TO FIX +# ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # autoclass -autoclass_content = 'both' +autoclass_content = "both" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -108,7 +108,7 @@ release = version # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -122,7 +122,7 @@ add_function_parentheses = True add_module_names = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sortins as "systems = False @@ -143,7 +143,7 @@ viewcode_import = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'pydata_sphinx_theme' +html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -151,7 +151,7 @@ html_theme = 'pydata_sphinx_theme' # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] +# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -162,7 +162,7 @@ html_theme_options = { "name": "GitHub", "url": "https://github.com/mathLab/PINA", "icon": "fab fa-github", - "type": "fontawesome", + "type": "fontawesome", }, { "name": "Twitter", @@ -172,7 +172,7 @@ html_theme_options = { }, { "name": "Email", - "url": "mailto:pina.mathlab@gmail.com", + "url": "mailto:pina.mathlab@gmail.com", "icon": "fas fa-envelope", "type": "fontawesome", }, @@ -185,7 +185,7 @@ html_theme_options = { # If not ''i, a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -html_last_updated_fmt = '%b %d, %Y' +html_last_updated_fmt = "%b %d, %Y" # If false, no index is generated. html_use_index = True @@ -197,40 +197,52 @@ html_show_sourcelink = True html_show_copyright = True # Output file base name for HTML help builder. -htmlhelp_basename = 'pinadoc' +htmlhelp_basename = "pinadoc" + +# Link to external html files +html_extra_path = ["tutorials"] + +# Avoid side bar for html files +html_sidebars = { + "_tutorial": [], + "_team": [], + "_cite": [], + "_contributing": [], + "_installation": [], + "_LICENSE": [], +} # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). - 'papersize': 'a4paper', - + "papersize": "a4paper", # The font size ('10pt', '11pt' or '12pt'). - 'pointsize': '20pt', - + "pointsize": "20pt", # Additional stuff for the LaTeX preamble. - 'preamble': '', - + "preamble": "", # Latex figure (float) alignment - 'figure_align': 'htbp', + "figure_align": "htbp", } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'pina.tex', u'PINA Documentation', - u'PINA contributors', 'manual'), + ( + master_doc, + "pina.tex", + "PINA Documentation", + "PINA contributors", + "manual", + ), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pina', u'PINA Documentation', - [author], 1) -] +man_pages = [(master_doc, "pina", "PINA Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -238,11 +250,20 @@ man_pages = [ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'pina', u'PINA Documentation', - author, 'pina', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "pina", + "PINA Documentation", + author, + "pina", + "One line description of project.", + "Miscellaneous", + ), ] # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False -autodoc_member_order = 'bysource' +autodoc_member_order = "bysource" + +# Do consider meth ending with _ (needed for in-place methods of torch) +strip_signature_backslash = True diff --git a/docs/source/index.rst b/docs/source/index.rst index d062abd..fbebe0a 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -9,32 +9,32 @@ Welcome to PINA’s documentation! .. grid-item:: .. image:: index_files/tutorial_13_3.png - :target: tutorials/tutorial2/tutorial.html + :target: tutorial2/tutorial.html .. grid-item:: .. image:: index_files/tutorial_32_0.png - :target: tutorials/tutorial4/tutorial.html + :target: tutorial4/tutorial.html .. grid-item:: .. image:: index_files/tutorial_13_01.png - :target: tutorials/tutorial9/tutorial.html + :target: tutorial9/tutorial.html .. grid-item:: .. image:: index_files/tutorial_36_0.png - :target: tutorials/tutorial6/tutorial.html + :target: tutorial6/tutorial.html .. grid-item:: .. image:: index_files/tutorial_15_0.png - :target: tutorials/tutorial13/tutorial.html + :target: tutorial13/tutorial.html .. grid-item:: .. image:: index_files/tutorial_5_0.png - :target: tutorials/tutorial10/tutorial.html + :target: tutorial10/tutorial.html .. grid:: 1 1 3 3 @@ -45,7 +45,7 @@ Welcome to PINA’s documentation! an open-source Python library providing an intuitive interface for solving differential equations using PINNs, NOs or both together. - Based on `PyTorch `_ and `PyTorchLightning `_, **PINA** offers a simple and intuitive way to formalize a specific (differential) problem + Based on `PyTorch `_, `PyTorchLightning `_, and `PyG `_, **PINA** offers a simple and intuitive way to formalize a specific (differential) problem and solve it using neural networks . The approximated solution of a differential equation can be implemented using PINA in a few lines of code thanks to the intuitive and user-friendly interface. @@ -63,9 +63,9 @@ Welcome to PINA’s documentation! .. toctree:: :maxdepth: 1 - Installing <_installation> - Tutorial <_tutorial> API <_rst/_code> + Tutorial <_tutorial> + Installing <_installation> Team & Foundings <_team.rst> Contributing <_contributing> License <_LICENSE.rst> diff --git a/docs/sphinx_extensions/paramref_extension.py b/docs/sphinx_extensions/paramref_extension.py index 3b72284..e4f9396 100644 --- a/docs/sphinx_extensions/paramref_extension.py +++ b/docs/sphinx_extensions/paramref_extension.py @@ -1,11 +1,12 @@ from docutils import nodes from docutils.parsers.rst.roles import register_local_role + def paramref_role(name, rawtext, text, lineno, inliner, options={}, content=[]): # Simply replace :paramref: with :param: new_role = nodes.literal(text=text[1:]) return [new_role], [] -def setup(app): - register_local_role('paramref', paramref_role) +def setup(app): + register_local_role("paramref", paramref_role) diff --git a/pina/adaptive_function/adaptive_function_interface.py b/pina/adaptive_function/adaptive_function_interface.py index 6a9707f..a655fdb 100644 --- a/pina/adaptive_function/adaptive_function_interface.py +++ b/pina/adaptive_function/adaptive_function_interface.py @@ -7,9 +7,7 @@ from ..utils import check_consistency, is_function class AdaptiveActivationFunctionInterface(torch.nn.Module, metaclass=ABCMeta): r""" - The - :class:`~pina.adaptive_function.adaptive_func_interface.\ - AdaptiveActivationFunctionInterface` + The :class:`AdaptiveActivationFunctionInterface` class makes a :class:`torch.nn.Module` activation function into an adaptive trainable activation function. If one wants to create an adpative activation function, this class must be use as base class. diff --git a/pina/data/data_module.py b/pina/data/data_module.py index 1e69d6e..2489010 100644 --- a/pina/data/data_module.py +++ b/pina/data/data_module.py @@ -16,29 +16,23 @@ from ..collector import Collector class DummyDataloader: - """ - Dataloader used when batch size is ``None``. It returns the entire dataset - in a single batch. - """ def __init__(self, dataset): """ - Preprare a dataloader object which will return the entire dataset - in a single batch. Depending on the number of GPUs, the dataset is - managed as follows: + Prepare a dataloader object that returns the entire dataset in a single + batch. Depending on the number of GPUs, the dataset is managed + as follows: - - **Distributed Environment** (multiple GPUs): - - Divides the dataset across processes using the rank and world - size. - - Fetches only the portion of data corresponding to the current - process. - - **Non-Distributed Environment** (single GPU): - - Fetches the entire dataset. + - **Distributed Environment** (multiple GPUs): Divides dataset across + processes using the rank and world size. Fetches only portion of + data corresponding to the current process. + - **Non-Distributed Environment** (single GPU): Fetches the entire + dataset. - :param dataset: The dataset object to be processed. - :type dataset: PinaDataset + :param PinaDataset dataset: The dataset object to be processed. - .. note:: This data loader is used when the batch size is ``None``. + .. note:: + This dataloader is used when the batch size is ``None``. """ if ( @@ -84,8 +78,10 @@ class Collator: Initialize the object, setting the collate function based on whether automatic batching is enabled or not. - :param dict max_conditions_lengths: dict containing the maximum number - of data points to consider in a single batch for each condition. + :param dict max_conditions_lengths: ``dict`` containing the maximum + number of data points to consider in a single batch for + each condition. + :param bool automatic_batching: Whether to enable automatic batching. :param PinaDataset dataset: The dataset where the data is stored. """ diff --git a/pina/data/dataset.py b/pina/data/dataset.py index 798cdb6..cf1d711 100644 --- a/pina/data/dataset.py +++ b/pina/data/dataset.py @@ -276,7 +276,8 @@ class PinaGraphDataset(PinaDataset): :param data: List of items to collate in a single batch. :type data: list[Data] | list[Graph] :return: Batch object. - :rtype: Batch | LabelBatch + :rtype: :class:`~torch_geometric.data.Batch` + | :class:`~pina.graph.LabelBatch` """ if isinstance(data[0], Data): diff --git a/pina/graph.py b/pina/graph.py index f636899..1340ed6 100644 --- a/pina/graph.py +++ b/pina/graph.py @@ -399,8 +399,9 @@ class LabelBatch(Batch): :param data_list: List of :class:`~torch_geometric.data.Data` or :class:`~pina.graph.Graph` objects. :type data_list: list[Data] | list[Graph] - :return: A :class:`Batch` object containing the input data. - :rtype: Batch + :return: A :class:`~torch_geometric.data.Batch` object containing + the input data. + :rtype: :class:`~torch_geometric.data.Batch` """ # Store the labels of Data/Graph objects (all data have the same labels) # If the data do not contain labels, labels is an empty dictionary, diff --git a/pina/label_tensor.py b/pina/label_tensor.py index 0708300..83da093 100644 --- a/pina/label_tensor.py +++ b/pina/label_tensor.py @@ -389,14 +389,15 @@ class LabelTensor(torch.Tensor): def requires_grad_(self, mode=True): """ - Override the requires_grad_ method to handle the labels in the new - tensor. For more details, see :meth:`torch.Tensor.requires_grad_`. + Override the :meth:`~torch.Tensor.requires_grad_` method to handle + the labels in the new tensor. + For more details, see :meth:`~torch.Tensor.requires_grad_`. :param bool mode: A boolean value indicating whether the tensor should track gradients.If `True`, the tensor will track gradients; if `False`, it will not. :return: The :class:`~pina.label_tensor.LabelTensor` itself with the - updated `requires_grad` state and retained labels. + updated ``requires_grad`` state and retained labels. :rtype: LabelTensor """ diff --git a/pina/model/block/convolution.py b/pina/model/block/convolution.py index 439a8d6..666f66a 100644 --- a/pina/model/block/convolution.py +++ b/pina/model/block/convolution.py @@ -15,6 +15,7 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta): batch_size, :math:`N_{in}` is the number of input fields, :math:`N` the number of points in the mesh, :math:`D` the dimension of the problem. In particular: + * :math:`D` is the number of spatial variables + 1. The last column must contain the field value. * :math:`N_{in}` represents the number of function components. diff --git a/pina/model/block/convolution_2d.py b/pina/model/block/convolution_2d.py index dfd9968..825ae61 100644 --- a/pina/model/block/convolution_2d.py +++ b/pina/model/block/convolution_2d.py @@ -15,10 +15,13 @@ class ContinuousConvBlock(BaseContinuousConv): batch_size, :math:`N_{in}` is the number of input fields, :math:`N` the number of points in the mesh, :math:`D` the dimension of the problem. In particular: + * :math:`D` is the number of spatial variables + 1. The last column must - contain the field value. - * :math:`N_{in}` represents the number of function components. - For instance, a vectorial function :math:`f = [f_1, f_2]` has + contain the field value. For example for 2D problems :math:`D=3` and + the tensor will be something like ``[first coordinate, second + coordinate, field value]``. + * :math:`N_{in}` represents the number of vectorial function presented. + For example a vectorial function :math:`f = [f_1, f_2]` will have :math:`N_{in}=2`. .. seealso:: diff --git a/pina/model/deeponet.py b/pina/model/deeponet.py index 678f4e3..6da1616 100644 --- a/pina/model/deeponet.py +++ b/pina/model/deeponet.py @@ -412,7 +412,8 @@ class DeepONet(MIONet): Differently, for a :class:`torch.Tensor` only a list of integers can be passed for ``input_indeces_branch_net`` and ``input_indeces_trunk_net``. - .. warning:: + + .. warning:: No checks are performed in the forward pass to verify if the input is instance of either :class:`~pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. In general, in case of a diff --git a/pina/model/feed_forward.py b/pina/model/feed_forward.py index 5dcc764..a1651b3 100644 --- a/pina/model/feed_forward.py +++ b/pina/model/feed_forward.py @@ -36,7 +36,7 @@ class FeedForward(torch.nn.Module): :param int inner_size: The number of neurons for each hidden layer. Default is ``20``. :param int n_layers: The number of hidden layers. Default is ``2``. - ::param func: The activation function. If a list is passed, it must have + :param func: The activation function. If a list is passed, it must have the same length as ``n_layers``. If a single function is passed, it is used for all layers, except for the last one. Default is :class:`torch.nn.Tanh`. @@ -144,7 +144,7 @@ class ResidualFeedForward(torch.nn.Module): :param int inner_size: The number of neurons for each hidden layer. Default is ``20``. :param int n_layers: The number of hidden layers. Default is ``2``. - ::param func: The activation function. If a list is passed, it must have + :param func: The activation function. If a list is passed, it must have the same length as ``n_layers``. If a single function is passed, it is used for all layers, except for the last one. Default is :class:`torch.nn.Tanh`. diff --git a/pina/model/fourier_neural_operator.py b/pina/model/fourier_neural_operator.py index f93d126..bef7274 100644 --- a/pina/model/fourier_neural_operator.py +++ b/pina/model/fourier_neural_operator.py @@ -274,7 +274,7 @@ class FNO(KernelNeuralOperator): layers=None, ): """ - param torch.nn.Module lifting_net: The lifting neural network mapping + :param torch.nn.Module lifting_net: The lifting neural network mapping the input to its hidden dimension. :param torch.nn.Module projecting_net: The projection neural network mapping the hidden representation to the output function. @@ -318,22 +318,24 @@ class FNO(KernelNeuralOperator): def forward(self, x): """ - Forward pass for the :class:`FourierNeuralOperator` model. + Forward pass for the :class:`FourierNeuralOperator` model. - The ``lifting_net`` maps the input to the hidden dimension. - Then, several layers of Fourier blocks are applied. Finally, the - ``projection_net`` maps the hidden representation to the output - function. + The ``lifting_net`` maps the input to the hidden dimension. + Then, several layers of Fourier blocks are applied. Finally, the + ``projection_net`` maps the hidden representation to the output + function. - : param x: The input tensor for performing the computation. Depending - on the ``dimensions`` in the initialization, it expects a tensor - with the following shapes: - * 1D tensors: ``[batch, X, channels]`` - * 2D tensors: ``[batch, X, Y, channels]`` - * 3D tensors: ``[batch, X, Y, Z, channels]`` - :type x: torch.Tensor | LabelTensor - :return: The output tensor. - :rtype: torch.Tensor + :param x: The input tensor for performing the computation. Depending + on the ``dimensions`` in the initialization, it expects a tensor + with the following shapes: + + * 1D tensors: ``[batch, X, channels]`` + * 2D tensors: ``[batch, X, Y, channels]`` + * 3D tensors: ``[batch, X, Y, Z, channels]`` + + :type x: torch.Tensor | LabelTensor + :return: The output tensor. + :rtype: torch.Tensor """ if isinstance(x, LabelTensor): diff --git a/pina/model/graph_neural_operator.py b/pina/model/graph_neural_operator.py index 960c5ed..3cb5cdd 100644 --- a/pina/model/graph_neural_operator.py +++ b/pina/model/graph_neural_operator.py @@ -8,9 +8,9 @@ from .kernel_neural_operator import KernelNeuralOperator class GraphNeuralKernel(torch.nn.Module): """ - Graph Neural Kernel model class. + Graph Neural Operator kernel model class. - This class implements the Graph Neural Kernel network. + This class implements the Graph Neural Operator kernel network. .. seealso:: @@ -18,8 +18,7 @@ class GraphNeuralKernel(torch.nn.Module): Liu, B., Bhattacharya, K., Stuart, A., Anandkumar, A. (2020). *Neural Operator: Graph Kernel Network for Partial Differential Equations*. - DOI: `arXiv preprint arXiv:2003.03485. - `_ + DOI: `arXiv preprint arXiv:2003.03485 `_ """ def __init__( @@ -171,7 +170,7 @@ class GraphNeuralOperator(KernelNeuralOperator): """ Initialization of the :class:`GraphNeuralOperator` class. - param torch.nn.Module lifting_operator: The lifting neural network + :param torch.nn.Module lifting_operator: The lifting neural network mapping the input to its hidden dimension. :param torch.nn.Module projection_operator: The projection neural network mapping the hidden representation to the output function. diff --git a/pina/optim/torch_optimizer.py b/pina/optim/torch_optimizer.py index 9c8674c..68cee72 100644 --- a/pina/optim/torch_optimizer.py +++ b/pina/optim/torch_optimizer.py @@ -17,8 +17,9 @@ class TorchOptimizer(Optimizer): :param torch.optim.Optimizer optimizer_class: A :class:`torch.optim.Optimizer` class. - :param dict kwargs: Additional parameters passed to `optimizer_class`, - see more: _. + :param dict kwargs: Additional parameters passed to ``optimizer_class``, + see more + `here `_. """ check_consistency(optimizer_class, torch.optim.Optimizer, subclass=True) diff --git a/pina/optim/torch_scheduler.py b/pina/optim/torch_scheduler.py index 40b5e46..781a6e6 100644 --- a/pina/optim/torch_scheduler.py +++ b/pina/optim/torch_scheduler.py @@ -23,8 +23,9 @@ class TorchScheduler(Scheduler): :param torch.optim.LRScheduler scheduler_class: A :class:`torch.optim.LRScheduler` class. - :param dict kwargs: Additional parameters passed to `scheduler_class`, - see more: _. + :param dict kwargs: Additional parameters passed to ``scheduler_class``, + see more + `here _`. """ check_consistency(scheduler_class, LRScheduler, subclass=True) diff --git a/pina/problem/abstract_problem.py b/pina/problem/abstract_problem.py index 21afe86..266d7d3 100644 --- a/pina/problem/abstract_problem.py +++ b/pina/problem/abstract_problem.py @@ -47,7 +47,7 @@ class AbstractProblem(metaclass=ABCMeta): Get batching dimension. :return: The batching dimension. - :rtype int + :rtype: int """ return self._batching_dimension @@ -85,7 +85,7 @@ class AbstractProblem(metaclass=ABCMeta): points. :return: The discretised domains. - :rtype dict + :rtype: dict """ return self._discretised_domains @@ -178,13 +178,28 @@ class AbstractProblem(metaclass=ABCMeta): chebyshev sampling, ``chebyshev``; grid sampling ``grid``. :param domains: The domains from which to sample. Default is ``all``. :type domains: str | list[str] - :param dict sample_rules: A dictionary of custom sampling rules. + :param dict sample_rules: A dictionary defining custom sampling rules + for input variables. If provided, it must contain a dictionary + specifying the sampling rule for each variable, overriding the + ``n`` and ``mode`` arguments. Each key must correspond to the + input variables from + :meth:~pina.problem.AbstractProblem.input_variables, and its value + should be another dictionary with + two keys: ``n`` (number of points to sample) and ``mode`` + (sampling method). Defaults to None. :raises RuntimeError: If both ``n`` and ``sample_rules`` are specified. :raises RuntimeError: If neither ``n`` nor ``sample_rules`` are set. :Example: >>> problem.discretise_domain(n=10, mode='grid') >>> problem.discretise_domain(n=10, mode='grid', domains=['gamma1']) + >>> problem.discretise_domain( + ... sample_rules={ + ... 'x': {'n': 10, 'mode': 'grid'}, + ... 'y': {'n': 100, 'mode': 'grid'} + ... }, + ... domains=['D'] + ... ) .. warning:: ``random`` is currently the only implemented ``mode`` for all @@ -197,6 +212,11 @@ class AbstractProblem(metaclass=ABCMeta): :class:`~pina.domain.intersection_domain.Intersection`. The modes ``latin`` or ``lh``, ``chebyshev``, ``grid`` are only implemented for :class:`~pina.domain.cartesian.CartesianDomain`. + + .. warning:: + If custom discretisation is applied by setting ``sample_rules`` not + to ``None``, then the discretised domain must be of class + :class:`~pina.domain.cartesian.CartesianDomain` """ # check consistecy n, mode, variables, locations diff --git a/pina/solver/physic_informed_solver/rba_pinn.py b/pina/solver/physic_informed_solver/rba_pinn.py index d71cd6e..feeb5c8 100644 --- a/pina/solver/physic_informed_solver/rba_pinn.py +++ b/pina/solver/physic_informed_solver/rba_pinn.py @@ -82,7 +82,7 @@ class RBAPINN(PINN): :param AbstractProblem problem: The problem to be solved. :param torch.nn.Module model: The neural network model to be used. - param Optimizer optimizer: The optimizer to be used. + :param Optimizer optimizer: The optimizer to be used. If `None`, the :class:`torch.optim.Adam` optimizer is used. Default is ``None``. :param Scheduler scheduler: Learning rate scheduler. diff --git a/pina/trainer.py b/pina/trainer.py index 0a84f56..688b92f 100644 --- a/pina/trainer.py +++ b/pina/trainer.py @@ -13,8 +13,9 @@ class Trainer(lightning.pytorch.Trainer): PINA custom Trainer class to extend the standard Lightning functionality. This class enables specific features or behaviors required by the PINA - framework. It modifies the standard :class:`lightning.pytorch.Trainer` class - to better support the training process in PINA. + framework. It modifies the standard + :class:`lightning.pytorch.Trainer ` + class to better support the training process in PINA. """ def __init__( @@ -207,7 +208,9 @@ class Trainer(lightning.pytorch.Trainer): """ Manage the training process of the solver. - :param dict kwargs: Additional keyword arguments. + :param dict kwargs: Additional keyword arguments. See `pytorch-lightning + Trainer API `_ + for details. """ return super().fit(self.solver, datamodule=self.data_module, **kwargs) @@ -215,7 +218,9 @@ class Trainer(lightning.pytorch.Trainer): """ Manage the test process of the solver. - :param dict kwargs: Additional keyword arguments. + :param dict kwargs: Additional keyword arguments. See `pytorch-lightning + Trainer API `_ + for details. """ return super().test(self.solver, datamodule=self.data_module, **kwargs)