diff --git a/docs/.nojekyll b/docs/.nojekyll deleted file mode 100644 index e69de29..0000000 diff --git a/docs/index.html b/docs/index.html deleted file mode 100644 index ebe8f9c..0000000 --- a/docs/index.html +++ /dev/null @@ -1,6 +0,0 @@ - - - - - - diff --git a/docs/source/_cite.rst b/docs/source/_cite.rst new file mode 100644 index 0000000..71d5379 --- /dev/null +++ b/docs/source/_cite.rst @@ -0,0 +1,21 @@ +Cite PINA +============== + +If PINA has been significant in your research, and you would like to acknowledge the project in your academic publication, +we suggest citing the following paper: + +*Coscia, D., Ivagnes, A., Demo, N., & Rozza, G. (2023). Physics-Informed Neural networks for Advanced modeling. Journal of Open Source Software, 8(87), 5352.* + +Or in BibTex format + +.. code:: bash + + @article{coscia2023physics, + title={Physics-Informed Neural networks for Advanced modeling}, + author={Coscia, Dario and Ivagnes, Anna and Demo, Nicola and Rozza, Gianluigi}, + journal={Journal of Open Source Software}, + volume={8}, + number={87}, + pages={5352}, + year={2023} + } \ No newline at end of file diff --git a/docs/source/_rst/_code.rst b/docs/source/_rst/_code.rst index 33d8a08..bdbe70c 100644 --- a/docs/source/_rst/_code.rst +++ b/docs/source/_rst/_code.rst @@ -1,18 +1,85 @@ Code Documentation ================== Welcome to PINA documentation! Here you can find the modules of the package divided in different sections. +The high-level structure of the package is depicted in our API. + +.. figure:: ../index_files/API_color.png + :alt: PINA application program interface + :align: center + :width: 400 + + +The pipeline to solve differential equations with PINA follows just five steps: + + 1. Define the `Problem`_ the user aim to solve + 2. Generate data using built in `Geometries`_, or load high level simulation results as :doc:`LabelTensor ` + 3. Choose or build one or more `Models`_ to solve the problem + 4. Choose a solver across PINA available `Solvers`_, or build one using the :doc:`SolverInterface ` + 5. Train the model with the PINA :doc:`Trainer `, enhance the train with `Callbacks_` PINA Features -------- +-------------- .. toctree:: :titlesonly: LabelTensor Condition + Trainer Plotter + +Solvers +-------------- + +.. toctree:: + :titlesonly: + + SolverInterface + PINN + Supervised solver + GAROM + + +Models +------------ + +.. toctree:: + :titlesonly: + :maxdepth: 5 + + Network + FeedForward + MultiFeedForward + ResidualFeedForward + DeepONet + MIONet + FNO + +Layers +------------- + +.. toctree:: + :titlesonly: + + Residual layer + EnhancedLinear layer + Spectral convolution + Fourier layers + Continuous convolution + + +Equations and Operators +------------------------- + +.. toctree:: + :titlesonly: + + Equations + Differential Operators + + Problem -------- +-------------- .. toctree:: :titlesonly: @@ -22,40 +89,8 @@ Problem TimeDependentProblem ParametricProblem -Solvers -------- - -.. toctree:: - :titlesonly: - - SolverInterface - PINN - - -Models ------ - -.. toctree:: - :titlesonly: - - Network - FeedForward - MultiFeedForward - ResidualFeedForward - DeepONet - FNO - -Layers ------- - -.. toctree:: - :titlesonly: - - ContinuousConv - - Geometries ----------- +----------------- .. toctree:: :titlesonly: @@ -65,9 +100,30 @@ Geometries EllipsoidDomain SimplexDomain +Geometry set operations +------------------------ -Loss ------- +.. toctree:: + :titlesonly: + + OperationInterface + Union + Intersection + Difference + Exclusion + +Callbacks +-------------------- + +.. toctree:: + :titlesonly: + + Metric tracking + Optimizer callbacks + Adaptive Refinments + +Metrics and Losses +-------------------- .. toctree:: :titlesonly: diff --git a/docs/source/_rst/_tutorial.rst b/docs/source/_rst/_tutorial.rst new file mode 100644 index 0000000..f904f83 --- /dev/null +++ b/docs/source/_rst/_tutorial.rst @@ -0,0 +1,39 @@ +PINA Tutorials +============== + +In this folder we collect useful tutorials in order to understand the principles and the potential of **PINA**. + +Getting started with PINA +------------------------- +.. toctree:: + :maxdepth: 3 + :titlesonly: + + Introduction to PINA for Physics Informed Neural Networks training + Building custom geometries with PINA Location class + +Physics Informed Neural Networks +-------------------------------- +.. toctree:: + :maxdepth: 3 + :titlesonly: + + Two dimensional Poisson problem using Extra Features Learning + Two dimensional Wave problem with hard constraint + + +Neural Operator Learning +------------------------ +.. toctree:: + :maxdepth: 3 + :titlesonly: + + Two dimensional Darcy flow using the Fourier Neural Operator + +Supervised Learning +------------------- +.. toctree:: + :maxdepth: 3 + :titlesonly: + + Unstructured convolutional autoencoder via continuous convolution \ No newline at end of file diff --git a/docs/source/_rst/_tutorials.rst b/docs/source/_rst/_tutorials.rst deleted file mode 100644 index de5b475..0000000 --- a/docs/source/_rst/_tutorials.rst +++ /dev/null @@ -1,27 +0,0 @@ - -PINA Tutorials -============== - -In this folder we collect useful tutorials in order to understand the principles and the potential of **PINA**. - -.. toctree:: - :maxdepth: 3 - :hidden: - -Getting started with PINA -------------------------- - * :doc:`Introduction to PINA for Physics Informed Neural Networks training` - * :doc:`Building custom geometries with PINA Location class` - -Physics Informed Neural Networks --------------------------------- - * :doc:`Two dimensional Poisson problem using Extra Features Learning` - * :doc:`Two dimensional Wave problem with hard constraint` - -Neural Operator Learning ------------------------- - * :doc:`Two dimensional Darcy flow using the Fourier Neural Operator` - -Supervised Learning -------------------- - * :doc:`Unstructured convolutional autoencoder via continuous convolution` diff --git a/docs/source/_rst/callbacks/adaptive_refinment_callbacks.rst b/docs/source/_rst/callbacks/adaptive_refinment_callbacks.rst new file mode 100644 index 0000000..11b313e --- /dev/null +++ b/docs/source/_rst/callbacks/adaptive_refinment_callbacks.rst @@ -0,0 +1,7 @@ +Adaptive Refinments callbacks +=============================== + +.. currentmodule:: pina.callbacks.adaptive_refinment_callbacks +.. autoclass:: R3Refinement + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/callbacks/optimizer_callbacks.rst b/docs/source/_rst/callbacks/optimizer_callbacks.rst new file mode 100644 index 0000000..7ee418f --- /dev/null +++ b/docs/source/_rst/callbacks/optimizer_callbacks.rst @@ -0,0 +1,7 @@ +Optimizer callbacks +===================== + +.. currentmodule:: pina.callbacks.optimizer_callbacks +.. autoclass:: SwitchOptimizer + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/callbacks/processing_callbacks.rst b/docs/source/_rst/callbacks/processing_callbacks.rst new file mode 100644 index 0000000..e024a49 --- /dev/null +++ b/docs/source/_rst/callbacks/processing_callbacks.rst @@ -0,0 +1,7 @@ +Processing callbacks +======================= + +.. currentmodule:: pina.callbacks.processing_callbacks +.. autoclass:: MetricTracker + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/condition.rst b/docs/source/_rst/condition.rst index 8a554d1..088b966 100644 --- a/docs/source/_rst/condition.rst +++ b/docs/source/_rst/condition.rst @@ -2,9 +2,6 @@ Condition ========= .. currentmodule:: pina.condition -.. automodule:: pina.condition - .. autoclass:: Condition :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/equations.rst b/docs/source/_rst/equations.rst new file mode 100644 index 0000000..6826dde --- /dev/null +++ b/docs/source/_rst/equations.rst @@ -0,0 +1,42 @@ +Equations +========== +Equations are used in PINA to make easy the training. During problem definition +each `equation` passed to a `Condition` object must be an `Equation` or `SystemEquation`. +An `Equation` is simply a wrapper over callable python functions, while `SystemEquation` is +a wrapper arounf a list of callable python functions. We provide a wide rage of already implemented +equations to ease the code writing, such as `FixedValue`, `Laplace`, and many more. + + +.. currentmodule:: pina.equation.equation_interface +.. autoclass:: EquationInterface + :members: + :show-inheritance: + +.. currentmodule:: pina.equation.equation +.. autoclass:: Equation + :members: + :show-inheritance: + + +.. currentmodule:: pina.equation.system_equation +.. autoclass:: SystemEquation + :members: + :show-inheritance: + + +.. currentmodule:: pina.equation.equation_factory +.. autoclass:: FixedValue + :members: + :show-inheritance: + +.. autoclass:: FixedGradient + :members: + :show-inheritance: + +.. autoclass:: FixedFlux + :members: + :show-inheritance: + +.. autoclass:: Laplace + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/geometry/cartesian.rst b/docs/source/_rst/geometry/cartesian.rst index 8e97197..b57c02b 100644 --- a/docs/source/_rst/geometry/cartesian.rst +++ b/docs/source/_rst/geometry/cartesian.rst @@ -1,5 +1,5 @@ CartesianDomain -=========== +====================== .. currentmodule:: pina.geometry.cartesian .. automodule:: pina.geometry.cartesian diff --git a/docs/source/_rst/geometry/difference_domain.rst b/docs/source/_rst/geometry/difference_domain.rst new file mode 100644 index 0000000..fc0b293 --- /dev/null +++ b/docs/source/_rst/geometry/difference_domain.rst @@ -0,0 +1,9 @@ +Difference +====================== +.. currentmodule:: pina.geometry.difference_domain + +.. automodule:: pina.geometry.difference_domain + +.. autoclass:: Difference + :members: + :show-inheritance: diff --git a/docs/source/_rst/geometry/ellipsoid.rst b/docs/source/_rst/geometry/ellipsoid.rst index 25d606c..09af427 100644 --- a/docs/source/_rst/geometry/ellipsoid.rst +++ b/docs/source/_rst/geometry/ellipsoid.rst @@ -1,5 +1,5 @@ EllipsoidDomain -=========== +====================== .. currentmodule:: pina.geometry.ellipsoid .. automodule:: pina.geometry.ellipsoid diff --git a/docs/source/_rst/geometry/exclusion_domain.rst b/docs/source/_rst/geometry/exclusion_domain.rst new file mode 100644 index 0000000..a07aafc --- /dev/null +++ b/docs/source/_rst/geometry/exclusion_domain.rst @@ -0,0 +1,9 @@ +Exclusion +====================== +.. currentmodule:: pina.geometry.exclusion_domain + +.. automodule:: pina.geometry.exclusion_domain + +.. autoclass:: Exclusion + :members: + :show-inheritance: diff --git a/docs/source/_rst/geometry/intersection_domain.rst b/docs/source/_rst/geometry/intersection_domain.rst new file mode 100644 index 0000000..a3c1356 --- /dev/null +++ b/docs/source/_rst/geometry/intersection_domain.rst @@ -0,0 +1,9 @@ +Intersection +====================== +.. currentmodule:: pina.geometry.intersection_domain + +.. automodule:: pina.geometry.intersection_domain + +.. autoclass:: Intersection + :members: + :show-inheritance: diff --git a/docs/source/_rst/geometry/location.rst b/docs/source/_rst/geometry/location.rst index 3603a24..5d680a1 100644 --- a/docs/source/_rst/geometry/location.rst +++ b/docs/source/_rst/geometry/location.rst @@ -1,10 +1,9 @@ Location -========= +==================== .. currentmodule:: pina.geometry.location .. automodule:: pina.geometry.location .. autoclass:: Location :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/geometry/operation_interface.rst b/docs/source/_rst/geometry/operation_interface.rst new file mode 100644 index 0000000..00a2d84 --- /dev/null +++ b/docs/source/_rst/geometry/operation_interface.rst @@ -0,0 +1,9 @@ +OperationInterface +====================== +.. currentmodule:: pina.geometry.operation_interface + +.. automodule:: pina.geometry.operation_interface + +.. autoclass:: OperationInterface + :members: + :show-inheritance: diff --git a/docs/source/_rst/geometry/simplex.rst b/docs/source/_rst/geometry/simplex.rst index f0b96a7..b5a83e4 100644 --- a/docs/source/_rst/geometry/simplex.rst +++ b/docs/source/_rst/geometry/simplex.rst @@ -1,5 +1,5 @@ SimplexDomain -=========== +====================== .. currentmodule:: pina.geometry.simplex .. automodule:: pina.geometry.simplex diff --git a/docs/source/_rst/geometry/union_domain.rst b/docs/source/_rst/geometry/union_domain.rst new file mode 100644 index 0000000..ad172d7 --- /dev/null +++ b/docs/source/_rst/geometry/union_domain.rst @@ -0,0 +1,9 @@ +Union +====================== +.. currentmodule:: pina.geometry.union_domain + +.. automodule:: pina.geometry.union_domain + +.. autoclass:: Union + :members: + :show-inheritance: diff --git a/docs/source/_rst/label_tensor.rst b/docs/source/_rst/label_tensor.rst index 07a665e..9eb2273 100644 --- a/docs/source/_rst/label_tensor.rst +++ b/docs/source/_rst/label_tensor.rst @@ -2,7 +2,6 @@ LabelTensor =========== .. currentmodule:: pina.label_tensor -.. automodule:: pina.label_tensor .. autoclass:: LabelTensor :members: diff --git a/docs/source/_rst/layers/convolution.rst b/docs/source/_rst/layers/convolution.rst index fb60aa1..3089fea 100644 --- a/docs/source/_rst/layers/convolution.rst +++ b/docs/source/_rst/layers/convolution.rst @@ -1,12 +1,8 @@ -ContinuousConvBlock -=================== +Continuous convolution +========================= .. currentmodule:: pina.model.layers.convolution_2d -.. automodule:: pina.model.layers.convolution_2d - .. autoclass:: ContinuousConvBlock :members: - :private-members: - :undoc-members: :show-inheritance: :noindex: diff --git a/docs/source/_rst/layers/enhanced_linear.rst b/docs/source/_rst/layers/enhanced_linear.rst new file mode 100644 index 0000000..ba30960 --- /dev/null +++ b/docs/source/_rst/layers/enhanced_linear.rst @@ -0,0 +1,8 @@ +EnhancedLinear +================= +.. currentmodule:: pina.model.layers.residual + +.. autoclass:: EnhancedLinear + :members: + :show-inheritance: + :noindex: \ No newline at end of file diff --git a/docs/source/_rst/layers/fourier.rst b/docs/source/_rst/layers/fourier.rst new file mode 100644 index 0000000..1321700 --- /dev/null +++ b/docs/source/_rst/layers/fourier.rst @@ -0,0 +1,16 @@ +Fourier Layers +=================== +.. currentmodule:: pina.model.layers.fourier + + +.. autoclass:: FourierBlock1D + :members: + :show-inheritance: + +.. autoclass:: FourierBlock2D + :members: + :show-inheritance: + +.. autoclass:: FourierBlock3D + :members: + :show-inheritance: diff --git a/docs/source/_rst/layers/residual.rst b/docs/source/_rst/layers/residual.rst new file mode 100644 index 0000000..1af11e5 --- /dev/null +++ b/docs/source/_rst/layers/residual.rst @@ -0,0 +1,7 @@ +Residual layer +=================== +.. currentmodule:: pina.model.layers.residual + +.. autoclass:: ResidualBlock + :members: + :show-inheritance: diff --git a/docs/source/_rst/layers/spectral.rst b/docs/source/_rst/layers/spectral.rst new file mode 100644 index 0000000..5635ba2 --- /dev/null +++ b/docs/source/_rst/layers/spectral.rst @@ -0,0 +1,15 @@ +Spectral Convolution +====================== +.. currentmodule:: pina.model.layers.spectral + +.. autoclass:: SpectralConvBlock1D + :members: + :show-inheritance: + +.. autoclass:: SpectralConvBlock2D + :members: + :show-inheritance: + +.. autoclass:: SpectralConvBlock3D + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/loss/loss_interface.rst b/docs/source/_rst/loss/loss_interface.rst index d449a91..6d4827d 100644 --- a/docs/source/_rst/loss/loss_interface.rst +++ b/docs/source/_rst/loss/loss_interface.rst @@ -1,10 +1,9 @@ LpLoss -==== +=============== .. currentmodule:: pina.loss .. automodule:: pina.loss .. autoclass:: LossInterface :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/loss/lploss.rst b/docs/source/_rst/loss/lploss.rst index b83a13b..f95d187 100644 --- a/docs/source/_rst/loss/lploss.rst +++ b/docs/source/_rst/loss/lploss.rst @@ -1,10 +1,10 @@ LpLoss -==== +=============== .. currentmodule:: pina.loss .. automodule:: pina.loss + :no-index: .. autoclass:: LpLoss :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/loss/powerloss.rst b/docs/source/_rst/loss/powerloss.rst index e8f5c49..0b1a7d9 100644 --- a/docs/source/_rst/loss/powerloss.rst +++ b/docs/source/_rst/loss/powerloss.rst @@ -1,10 +1,10 @@ PowerLoss -========= +==================== .. currentmodule:: pina.loss .. automodule:: pina.loss + :no-index: .. autoclass:: PowerLoss :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/models/deeponet.rst b/docs/source/_rst/models/deeponet.rst index 94bd6e9..0ca0824 100644 --- a/docs/source/_rst/models/deeponet.rst +++ b/docs/source/_rst/models/deeponet.rst @@ -2,9 +2,6 @@ DeepONet =========== .. currentmodule:: pina.model.deeponet -.. automodule:: pina.model.deeponet - .. autoclass:: DeepONet :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/models/fnn.rst b/docs/source/_rst/models/fnn.rst index 98bec0c..2dea8e5 100644 --- a/docs/source/_rst/models/fnn.rst +++ b/docs/source/_rst/models/fnn.rst @@ -1,10 +1,7 @@ FeedForward -=========== +====================== .. currentmodule:: pina.model.feed_forward -.. automodule:: pina.model.feed_forward - .. autoclass:: FeedForward :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/models/fnn_residual.rst b/docs/source/_rst/models/fnn_residual.rst index 6f3aeaf..66d83a4 100644 --- a/docs/source/_rst/models/fnn_residual.rst +++ b/docs/source/_rst/models/fnn_residual.rst @@ -1,10 +1,7 @@ ResidualFeedForward -=========== +====================== .. currentmodule:: pina.model.feed_forward -.. automodule:: pina.model.feed_forward - .. autoclass:: ResidualFeedForward :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/models/fno.rst b/docs/source/_rst/models/fno.rst index 73e0713..3d102b3 100644 --- a/docs/source/_rst/models/fno.rst +++ b/docs/source/_rst/models/fno.rst @@ -2,9 +2,6 @@ FNO =========== .. currentmodule:: pina.model.fno -.. automodule:: pina.model.fno - .. autoclass:: FNO :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/models/mionet.rst b/docs/source/_rst/models/mionet.rst new file mode 100644 index 0000000..fe62817 --- /dev/null +++ b/docs/source/_rst/models/mionet.rst @@ -0,0 +1,7 @@ +MIONet +=========== +.. currentmodule:: pina.model.deeponet + +.. autoclass:: MIONet + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/models/multifeedforward.rst b/docs/source/_rst/models/multifeedforward.rst index faa9c33..aa79580 100644 --- a/docs/source/_rst/models/multifeedforward.rst +++ b/docs/source/_rst/models/multifeedforward.rst @@ -1,10 +1,7 @@ MultiFeedForward -================ +================== .. currentmodule:: pina.model.multi_feed_forward -.. automodule:: pina.model.multi_feed_forward - .. autoclass:: MultiFeedForward :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/models/network.rst b/docs/source/_rst/models/network.rst index e81143a..4df9e19 100644 --- a/docs/source/_rst/models/network.rst +++ b/docs/source/_rst/models/network.rst @@ -1,10 +1,8 @@ Network ================ -.. currentmodule:: pina.model.network .. automodule:: pina.model.network .. autoclass:: Network :members: - :private-members: :show-inheritance: diff --git a/docs/source/_rst/equations_operators/operators.rst b/docs/source/_rst/operators.rst similarity index 56% rename from docs/source/_rst/equations_operators/operators.rst rename to docs/source/_rst/operators.rst index 458dcf8..59f7c7a 100644 --- a/docs/source/_rst/equations_operators/operators.rst +++ b/docs/source/_rst/operators.rst @@ -1,10 +1,8 @@ Operators =========== + .. currentmodule:: pina.operators .. automodule:: pina.operators :members: - :private-members: - :undoc-members: - :show-inheritance: - :noindex: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/problem/abstractproblem.rst b/docs/source/_rst/problem/abstractproblem.rst index 770533e..143909e 100644 --- a/docs/source/_rst/problem/abstractproblem.rst +++ b/docs/source/_rst/problem/abstractproblem.rst @@ -6,6 +6,4 @@ AbstractProblem .. autoclass:: AbstractProblem :members: - :private-members: - :undoc-members: :show-inheritance: diff --git a/docs/source/_rst/problem/parametricproblem.rst b/docs/source/_rst/problem/parametricproblem.rst index 84eac3f..8f217fb 100644 --- a/docs/source/_rst/problem/parametricproblem.rst +++ b/docs/source/_rst/problem/parametricproblem.rst @@ -1,10 +1,9 @@ ParametricProblem -================= +==================== .. currentmodule:: pina.problem.parametric_problem .. automodule:: pina.problem.parametric_problem .. autoclass:: ParametricProblem :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/problem/spatialproblem.rst b/docs/source/_rst/problem/spatialproblem.rst index 74c1fba..90ec6ec 100644 --- a/docs/source/_rst/problem/spatialproblem.rst +++ b/docs/source/_rst/problem/spatialproblem.rst @@ -6,6 +6,4 @@ SpatialProblem .. autoclass:: SpatialProblem :members: - :private-members: - :undoc-members: :show-inheritance: diff --git a/docs/source/_rst/problem/timedepproblem.rst b/docs/source/_rst/problem/timedepproblem.rst index 2f81119..93b8cb5 100644 --- a/docs/source/_rst/problem/timedepproblem.rst +++ b/docs/source/_rst/problem/timedepproblem.rst @@ -6,6 +6,4 @@ TimeDependentProblem .. autoclass:: TimeDependentProblem :members: - :private-members: - :undoc-members: :show-inheritance: diff --git a/docs/source/_rst/solvers/garom.rst b/docs/source/_rst/solvers/garom.rst new file mode 100644 index 0000000..5fcd97f --- /dev/null +++ b/docs/source/_rst/solvers/garom.rst @@ -0,0 +1,7 @@ +GAROM +====== +.. currentmodule:: pina.solvers.garom + +.. autoclass:: GAROM + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/solvers/pinn.rst b/docs/source/_rst/solvers/pinn.rst index 602b827..3e9b2ef 100644 --- a/docs/source/_rst/solvers/pinn.rst +++ b/docs/source/_rst/solvers/pinn.rst @@ -1,10 +1,7 @@ PINN -==== +====== .. currentmodule:: pina.solvers.pinn -.. automodule:: pina.solvers.pinn - .. autoclass:: PINN :members: - :private-members: :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/solvers/solver_interface.rst b/docs/source/_rst/solvers/solver_interface.rst index 05e3b98..363e1db 100644 --- a/docs/source/_rst/solvers/solver_interface.rst +++ b/docs/source/_rst/solvers/solver_interface.rst @@ -1,10 +1,7 @@ SolverInterface -=========== +================= .. currentmodule:: pina.solvers.solver -.. automodule:: pina.solvers.solver - .. autoclass:: SolverInterface - :members: :show-inheritance: - :noindex: + :members: diff --git a/docs/source/_rst/solvers/supervised.rst b/docs/source/_rst/solvers/supervised.rst new file mode 100644 index 0000000..895759e --- /dev/null +++ b/docs/source/_rst/solvers/supervised.rst @@ -0,0 +1,7 @@ +SupervisedSolver +=================== +.. currentmodule:: pina.solvers.supervised + +.. autoclass:: SupervisedSolver + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/trainer.rst b/docs/source/_rst/trainer.rst new file mode 100644 index 0000000..2582b6d --- /dev/null +++ b/docs/source/_rst/trainer.rst @@ -0,0 +1,8 @@ +Trainer +=========== + +.. automodule:: pina.trainer + +.. autoclass:: Trainer + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/source/_rst/tutorials/tutorial1/tutorial.rst b/docs/source/_rst/tutorials/tutorial1/tutorial.rst index f78b2d7..c76b5c1 100644 --- a/docs/source/_rst/tutorials/tutorial1/tutorial.rst +++ b/docs/source/_rst/tutorials/tutorial1/tutorial.rst @@ -28,8 +28,8 @@ Build a PINA problem Problem definition in the **PINA** framework is done by building a python ``class``, which inherits from one or more problem classes (``SpatialProblem``, ``TimeDependentProblem``, ``ParametricProblem``, …) -depending on the nature of the problem. Below is an example: ### Simple -Ordinary Differential Equation Consider the following: +depending on the nature of the problem. Below is an example. Consider the following +simple Ordinary Differential Equation: .. math:: diff --git a/docs/source/_rst/tutorials/tutorial2/tutorial.rst b/docs/source/_rst/tutorials/tutorial2/tutorial.rst index 80d2d03..8a59771 100644 --- a/docs/source/_rst/tutorials/tutorial2/tutorial.rst +++ b/docs/source/_rst/tutorials/tutorial2/tutorial.rst @@ -31,12 +31,16 @@ The problem definition ---------------------- The two-dimensional Poisson problem is mathematically written as: -:raw-latex:`\begin{equation} -\begin{cases} -\Delta u = \sin{(\pi x)} \sin{(\pi y)} \text{ in } D, \\ -u = 0 \text{ on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4, -\end{cases} -\end{equation}` where :math:`D` is a square domain :math:`[0,1]^2`, and + +.. math:: + \begin{equation} + \begin{cases} + \Delta u = \sin{(\pi x)} \sin{(\pi y)} \text{ in } D, \\ + u = 0 \text{ on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4, + \end{cases} + \end{equation} + +where :math:`D` is a square domain :math:`[0,1]^2`, and :math:`\Gamma_i`, with :math:`i=1,...,4`, are the boundaries of the square. @@ -158,9 +162,10 @@ is now defined, with an additional input variable, named extra-feature, which coincides with the forcing term in the Laplace equation. The set of input variables to the neural network is: -:raw-latex:`\begin{equation} -[x, y, k(x, y)], \text{ with } k(x, y)=\sin{(\pi x)}\sin{(\pi y)}, -\end{equation}` +.. math:: + \begin{equation} + [x, y, k(x, y)], \text{ with } k(x, y)=\sin{(\pi x)}\sin{(\pi y)}, + \end{equation} where :math:`x` and :math:`y` are the spatial coordinates and :math:`k(x, y)` is the added feature. @@ -249,9 +254,10 @@ Another way to exploit the extra features is the addition of learnable parameter inside them. In this way, the added parameters are learned during the training phase of the neural network. In this case, we use: -:raw-latex:`\begin{equation} -k(x, \mathbf{y}) = \beta \sin{(\alpha x)} \sin{(\alpha y)}, -\end{equation}` +.. math:: + \begin{equation} + k(x, \mathbf{y}) = \beta \sin{(\alpha x)} \sin{(\alpha y)}, + \end{equation} where :math:`\alpha` and :math:`\beta` are the abovementioned parameters. Their implementation is quite trivial: by using the class diff --git a/docs/source/_rst/tutorials/tutorial3/tutorial.rst b/docs/source/_rst/tutorials/tutorial3/tutorial.rst index 2b16113..23288b9 100644 --- a/docs/source/_rst/tutorials/tutorial3/tutorial.rst +++ b/docs/source/_rst/tutorials/tutorial3/tutorial.rst @@ -25,13 +25,14 @@ The problem definition The problem is written in the following form: -:raw-latex:`\begin{equation} -\begin{cases} -\Delta u(x,y,t) = \frac{\partial^2}{\partial t^2} u(x,y,t) \quad \text{in } D, \\\\ -u(x, y, t=0) = \sin(\pi x)\sin(\pi y), \\\\ -u(x, y, t) = 0 \quad \text{on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4, -\end{cases} -\end{equation}` +.. math:: + \begin{equation} + \begin{cases} + \Delta u(x,y,t) = \frac{\partial^2}{\partial t^2} u(x,y,t) \quad \text{in } D, \\\\ + u(x, y, t=0) = \sin(\pi x)\sin(\pi y), \\\\ + u(x, y, t) = 0 \quad \text{on } \Gamma_1 \cup \Gamma_2 \cup \Gamma_3 \cup \Gamma_4, + \end{cases} + \end{equation} where :math:`D` is a square domain :math:`[0,1]^2`, and :math:`\Gamma_i`, with :math:`i=1,...,4`, are the boundaries of the diff --git a/docs/source/_rst/tutorials/tutorial4/tutorial.rst b/docs/source/_rst/tutorials/tutorial4/tutorial.rst index d6cca05..f93c2fe 100644 --- a/docs/source/_rst/tutorials/tutorial4/tutorial.rst +++ b/docs/source/_rst/tutorials/tutorial4/tutorial.rst @@ -21,16 +21,15 @@ First of all we import the modules needed for the tutorial: import torchvision # for MNIST dataset from pina.model import FeedForward # for building AE and MNIST classification -The tutorial is structured as follow: \* `Continuous filter -background <#continuous-filter-background>`__: understand how the -convolutional filter works and how to use it. \* `Building a MNIST -Classifier <#building-a-mnist-classifier>`__: show how to build a simple -classifier using the MNIST dataset and how to combine a continuous -convolutional layer with a feedforward neural network. \* `Building a -Continuous Convolutional -Autoencoder <#building-a-continuous-convolutional-autoencoder>`__: show -how to use the continuous filter to work with unstructured data for -autoencoding and up-sampling. +The tutorial is structured as follow: + +* `Continuous filter background <#continuous-filter-background>`__: understand how the convolutional filter works and how to use it. +* `Building a MNIST Classifier <#building-a-mnist-classifier>`__: show how to build a simple + classifier using the MNIST dataset and how to combine a continuous + convolutional layer with a feedforward neural network. +* `Building a Continuous Convolutional Autoencoder <#building-a-continuous-convolutional-autoencoder>`__: show + show to use the continuous filter to work with unstructured data for + autoencoding and up-sampling. Continuous filter background ---------------------------- @@ -153,13 +152,16 @@ where to go. Here is an example for the :math:`[0,1]\times[0,5]` domain: "direction": [1, 1], } -This tells the filter: 1. ``domain``: square domain (the only -implemented) :math:`[0,1]\times[0,5]`. The minimum value is always zero, -while the maximum is specified by the user 2. ``start``: start position -of the filter, coordinate :math:`(0, 0)` 3. ``jump``: the jumps of the -centroid of the filter to the next position :math:`(0.1, 0.3)` 4. -``direction``: the directions of the jump, with ``1 = right``, -``0 = no jump``,\ ``-1 = left`` with respect to the current position +This tells the filter: + +1. ``domain``: square domain (the only implemented) :math:`[0,1]\times[0,5]`. The minimum value is always zero, + while the maximum is specified by the user +2. ``start``: start position + of the filter, coordinate :math:`(0, 0)` +3. ``jump``: the jumps of the + centroid of the filter to the next position :math:`(0.1, 0.3)` +4. ``direction``: the directions of the jump, with ``1 = right``, + ``0 = no jump``,\ ``-1 = left`` with respect to the current position **Note** @@ -170,9 +172,7 @@ Filter definition ~~~~~~~~~~~~~~~~~ Having defined all the previous blocks we are able to construct the -continuous filter. - -Suppose we would like to get an ouput with only one field, and let us +continuous filter. Suppose we would like to get an ouput with only one field, and let us fix the filter dimension to be :math:`[0.1, 0.1]`. .. code:: ipython3 @@ -192,13 +192,7 @@ fix the filter dimension to be :math:`[0.1, 0.1]`. output_numb_field=1, filter_dim=filter_dim, stride=stride) - - -.. parsed-literal:: - - /u/d/dcoscia/.local/lib/python3.9/site-packages/torch/functional.py:504: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:3483.) - return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] - + That’s it! In just one line of code we have created the continuous convolutional filter. By default the ``pina.model.FeedForward`` neural diff --git a/docs/source/_team.rst b/docs/source/_team.rst new file mode 100644 index 0000000..1fa4212 --- /dev/null +++ b/docs/source/_team.rst @@ -0,0 +1,24 @@ +PINA Team +============== + +PINA is currently developed by `Dario Coscia `_, `Nicola Demo `_, and `Anna Ivagnes `_ +under the supervision of `Prof. Gianluigi Rozza `_ in the `SISSA MathLab `_ group. + +A significant part of PINA has been written either as a by-product for other projects people were funded for, or by people on university-funded positions. +There are probably many of such projects that have led to some development of PINA. We are very grateful for this support! +In particular, we acknowledge the following sources of support with great gratitude: + +* `H2020 ERC CoG 2015 AROMA-CFD project 681447 `_, P.I. Professor `Prof. Gianluigi Rozza `_ at `SISSA MathLab `_. +* `Next Generation EU `_ for ambiental and digital transitionfor, Italy. + +.. figure:: index_files/foudings.png + :align: center + :width: 400 + +We also acknowledge the contribuition of `Maria Strazzullo `_ in the early developments of the package. A special +thank goeas to all the students and researchers from different universities which contributed to the package. Finally we warmly thank all the +`contributors `_! + +.. figure:: index_files/university_dev_pina.png + :align: center + :width: 500 diff --git a/docs/source/conf.py b/docs/source/conf.py index 22df97e..7efb647 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -22,6 +22,8 @@ import sphinx_rtd_theme sys.path.insert(0, os.path.abspath('../..')) import pina +sys.path.insert(0, os.path.abspath('../sphinx_extensions')) # extension to remove paramref link from lightinig + # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. @@ -36,29 +38,45 @@ import pina # ones. extensions = [ 'sphinx.ext.autodoc', - #'sphinx.ext.autosummary', - #'sphinx.ext.coverage', - #'sphinx.ext.graphviz', - #'sphinx.ext.doctest', + 'sphinx.ext.autosummary', + 'sphinx.ext.doctest', + 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', - #'sphinx.ext.coverage', + 'sphinx.ext.coverage', 'sphinx.ext.viewcode', - #'sphinx.ext.ifconfig', 'sphinx.ext.mathjax', - 'sphinx.ext.autosectionlabel', + 'sphinx.ext.intersphinx', + 'paramref_extension', # this extension is made to remove paramref links from lightining doc + 'sphinx_copybutton', ] -#autosummary_generate = True intersphinx_mapping = { 'python': ('http://docs.python.org/3', None), - 'numpy': ('http://docs.scipy.org/doc/numpy/', None), - 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None), + # 'numpy': ('http://docs.scipy.org/doc/numpy/', None), + # 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None), 'matplotlib': ('http://matplotlib.sourceforge.net/', None), - 'torch': ('https://pytorch.org/docs/stable/', None), -} + 'torch': ('https://pytorch.org/docs/stable/', None), + 'pytorch_lightning': ("https://lightning.ai/docs/pytorch/stable/", None), + } nitpicky = True +nitpick_ignore = [ + ('py:meth', 'pytorch_lightning.core.module.LightningModule.log'), + ('py:meth', 'pytorch_lightning.core.module.LightningModule.log_dict'), + ('py:exc', 'MisconfigurationException'), + ('py:func', 'torch.inference_mode'), + ('py:func', 'torch.no_grad'), + ('py:class', 'torch.utils.data.DistributedSampler'), + ('py:class', 'CartesianDomain'), # TO FIX + ('py:class', 'pina.model.layers.convolution.BaseContinuousConv'), + ('py:class', 'Module'), + ('py:class', 'torch.nn.modules.loss._Loss'), # TO FIX + ('py:class', 'torch.optim.LRScheduler'), # TO FIX + + ] + + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -95,7 +113,7 @@ release = version # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -125,11 +143,7 @@ add_module_names = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -keep_warnings = False +# A list of ignored prefixes for module index sortins as "systems = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -167,7 +181,13 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name of an image file (relative to this directory) to place at the top # of the sidebar. -# html_logo = None +html_logo = "index_files/pina_logo.png" +html_theme_options = { + 'logo_only': True, + 'display_version': True, + 'prev_next_buttons_location': 'bottom', + +} # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 @@ -182,7 +202,7 @@ html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -html_extra_path = ['_tutorials'] +# html_extra_path = ['_tutorial'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. @@ -322,3 +342,4 @@ texinfo_documents = [ # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False +autodoc_member_order = 'bysource' \ No newline at end of file diff --git a/docs/source/index.rst b/docs/source/index.rst index cac1ae5..8a242c5 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,13 +1,6 @@ Welcome to PINA's documentation! =================================================== -.. figure:: index_files/pina_logo.png - :align: center - :width: 150 - -| - - Physics Informed Neural network for Advanced modeling (**PINA**) is an open-source Python library providing an intuitive interface for solving differential equations using PINNs, NOs or both together. @@ -29,7 +22,7 @@ with PINA follows just five steps: problem definition, model selection, data gen .. figure:: index_files/API_color.png :alt: PINA application program interface :align: center - :width: 500 + :width: 600 | @@ -54,12 +47,10 @@ terms), without the need of re-training. .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :caption: Package Documentation: - + API <_rst/_code> - Contributing <_rst/_contributing> - License <_LICENSE.rst> .. the following is demo content intended to showcase some of the features you can invoke in reStructuredText .. this can be safely deleted or commented out @@ -67,8 +58,16 @@ terms), without the need of re-training. .. toctree:: :maxdepth: 1 - :numbered: :caption: Getting Started: Installation <_rst/_installation> - Tutorials <_rst/_tutorials> + Tutorials <_rst/_tutorial> + +.. toctree:: + :maxdepth: 1 + :caption: Community: + + Team & Foundings <_team.rst> + Contributing <_rst/_contributing> + License <_LICENSE.rst> + Cite PINA <_cite.rst> \ No newline at end of file diff --git a/docs/source/index_files/API_color.png b/docs/source/index_files/API_color.png index 97b25e7..9e61695 100644 Binary files a/docs/source/index_files/API_color.png and b/docs/source/index_files/API_color.png differ diff --git a/docs/source/index_files/foudings.png b/docs/source/index_files/foudings.png new file mode 100644 index 0000000..65b9237 Binary files /dev/null and b/docs/source/index_files/foudings.png differ diff --git a/docs/source/index_files/pina_logo.png b/docs/source/index_files/pina_logo.png index 53bef16..294cdde 100644 Binary files a/docs/source/index_files/pina_logo.png and b/docs/source/index_files/pina_logo.png differ diff --git a/docs/source/index_files/university_dev_pina.png b/docs/source/index_files/university_dev_pina.png new file mode 100644 index 0000000..9afb04f Binary files /dev/null and b/docs/source/index_files/university_dev_pina.png differ diff --git a/docs/sphinx_extensions/paramref_extension.py b/docs/sphinx_extensions/paramref_extension.py new file mode 100644 index 0000000..3b72284 --- /dev/null +++ b/docs/sphinx_extensions/paramref_extension.py @@ -0,0 +1,11 @@ +from docutils import nodes +from docutils.parsers.rst.roles import register_local_role + +def paramref_role(name, rawtext, text, lineno, inliner, options={}, content=[]): + # Simply replace :paramref: with :param: + new_role = nodes.literal(text=text[1:]) + return [new_role], [] + +def setup(app): + register_local_role('paramref', paramref_role) + diff --git a/pina/__init__.py b/pina/__init__.py index 60801b6..711161b 100644 --- a/pina/__init__.py +++ b/pina/__init__.py @@ -1,21 +1,13 @@ __all__ = [ - 'PINN', - 'Trainer', - 'LabelTensor', - 'Plotter', - 'Condition', - 'Location', - 'CartesianDomain' + 'PINN', 'Trainer', 'LabelTensor', 'Plotter', 'Condition', + 'SamplePointDataset', 'SamplePointLoader' ] from .meta import * from .label_tensor import LabelTensor -from .solvers.pinn import PINN +from .solvers.solver import SolverInterface from .trainer import Trainer from .plotter import Plotter from .condition import Condition -from .geometry import Location -from .geometry import CartesianDomain - from .dataset import SamplePointDataset -from .dataset import SamplePointLoader \ No newline at end of file +from .dataset import SamplePointLoader diff --git a/pina/adaptive_functions/__init__.py b/pina/adaptive_functions/__init__.py index adc0f17..7523199 100644 --- a/pina/adaptive_functions/__init__.py +++ b/pina/adaptive_functions/__init__.py @@ -1,4 +1,3 @@ - from .adaptive_tanh import AdaptiveTanh from .adaptive_sin import AdaptiveSin from .adaptive_cos import AdaptiveCos diff --git a/pina/adaptive_functions/adaptive_cos.py b/pina/adaptive_functions/adaptive_cos.py index 0bae214..10106aa 100644 --- a/pina/adaptive_functions/adaptive_cos.py +++ b/pina/adaptive_functions/adaptive_cos.py @@ -1,6 +1,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveCos(torch.nn.Module): ''' Implementation of soft exponential activation. @@ -18,7 +19,8 @@ class AdaptiveCos(torch.nn.Module): >>> x = torch.randn(256) >>> x = a1(x) ''' - def __init__(self, alpha = None): + + def __init__(self, alpha=None): ''' Initialization. INPUT: @@ -31,16 +33,18 @@ class AdaptiveCos(torch.nn.Module): # initialize alpha if alpha == None: - self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha + self.alpha = Parameter( + torch.tensor(1.0)) # create a tensor out of alpha else: - self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha - self.alpha.requiresGrad = True # set requiresGrad to true! + self.alpha = Parameter( + torch.tensor(alpha)) # create a tensor out of alpha + self.alpha.requiresGrad = True # set requiresGrad to true! self.scale = Parameter(torch.tensor(1.0)) - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale.requiresGrad = True # set requiresGrad to true! self.translate = Parameter(torch.tensor(0.0)) - self.translate.requiresGrad = True # set requiresGrad to true! + self.translate.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' diff --git a/pina/adaptive_functions/adaptive_exp.py b/pina/adaptive_functions/adaptive_exp.py index d9b1732..c65406f 100644 --- a/pina/adaptive_functions/adaptive_exp.py +++ b/pina/adaptive_functions/adaptive_exp.py @@ -1,6 +1,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveExp(torch.nn.Module): ''' Implementation of soft exponential activation. @@ -18,6 +19,7 @@ class AdaptiveExp(torch.nn.Module): >>> x = torch.randn(256) >>> x = a1(x) ''' + def __init__(self): ''' Initialization. @@ -28,14 +30,20 @@ class AdaptiveExp(torch.nn.Module): ''' super(AdaptiveExp, self).__init__() - self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale = Parameter( + torch.normal(torch.tensor(1.0), + torch.tensor(0.1))) # create a tensor out of alpha + self.scale.requiresGrad = True # set requiresGrad to true! - self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha - self.alpha.requiresGrad = True # set requiresGrad to true! + self.alpha = Parameter( + torch.normal(torch.tensor(1.0), + torch.tensor(0.1))) # create a tensor out of alpha + self.alpha.requiresGrad = True # set requiresGrad to true! - self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1))) # create a tensor out of alpha - self.translate.requiresGrad = True # set requiresGrad to true! + self.translate = Parameter( + torch.normal(torch.tensor(0.0), + torch.tensor(0.1))) # create a tensor out of alpha + self.translate.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' diff --git a/pina/adaptive_functions/adaptive_linear.py b/pina/adaptive_functions/adaptive_linear.py index f4b72e9..42968c9 100644 --- a/pina/adaptive_functions/adaptive_linear.py +++ b/pina/adaptive_functions/adaptive_linear.py @@ -2,6 +2,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveLinear(torch.nn.Module): ''' Implementation of soft exponential activation. @@ -19,6 +20,7 @@ class AdaptiveLinear(torch.nn.Module): >>> x = torch.randn(256) >>> x = a1(x) ''' + def __init__(self): ''' Initialization. @@ -30,10 +32,10 @@ class AdaptiveLinear(torch.nn.Module): super(AdaptiveLinear, self).__init__() self.scale = Parameter(torch.tensor(1.0)) - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale.requiresGrad = True # set requiresGrad to true! self.translate = Parameter(torch.tensor(0.0)) - self.translate.requiresGrad = True # set requiresGrad to true! + self.translate.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' diff --git a/pina/adaptive_functions/adaptive_relu.py b/pina/adaptive_functions/adaptive_relu.py index 14cf133..0061462 100644 --- a/pina/adaptive_functions/adaptive_relu.py +++ b/pina/adaptive_functions/adaptive_relu.py @@ -1,6 +1,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveReLU(torch.nn.Module, Parameter): ''' Implementation of soft exponential activation. @@ -18,6 +19,7 @@ class AdaptiveReLU(torch.nn.Module, Parameter): >>> x = torch.randn(256) >>> x = a1(x) ''' + def __init__(self): ''' Initialization. @@ -29,10 +31,10 @@ class AdaptiveReLU(torch.nn.Module, Parameter): super(AdaptiveReLU, self).__init__() self.scale = Parameter(torch.rand(1)) - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale.requiresGrad = True # set requiresGrad to true! self.translate = Parameter(torch.rand(1)) - self.translate.requiresGrad = True # set requiresGrad to true! + self.translate.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' @@ -40,4 +42,4 @@ class AdaptiveReLU(torch.nn.Module, Parameter): Applies the function to the input elementwise. ''' #x += self.translate - return torch.relu(x+self.translate)*self.scale + return torch.relu(x + self.translate) * self.scale diff --git a/pina/adaptive_functions/adaptive_sin.py b/pina/adaptive_functions/adaptive_sin.py index 80793c2..26a6ef3 100644 --- a/pina/adaptive_functions/adaptive_sin.py +++ b/pina/adaptive_functions/adaptive_sin.py @@ -1,6 +1,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveSin(torch.nn.Module): ''' Implementation of soft exponential activation. @@ -18,7 +19,8 @@ class AdaptiveSin(torch.nn.Module): >>> x = torch.randn(256) >>> x = a1(x) ''' - def __init__(self, alpha = None): + + def __init__(self, alpha=None): ''' Initialization. INPUT: @@ -29,14 +31,18 @@ class AdaptiveSin(torch.nn.Module): super(AdaptiveSin, self).__init__() # initialize alpha - self.alpha = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) # create a tensor out of alpha - self.alpha.requiresGrad = True # set requiresGrad to true! + self.alpha = Parameter( + torch.normal(torch.tensor(1.0), + torch.tensor(0.1))) # create a tensor out of alpha + self.alpha.requiresGrad = True # set requiresGrad to true! - self.scale = Parameter(torch.normal(torch.tensor(1.0), torch.tensor(0.1))) - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale = Parameter( + torch.normal(torch.tensor(1.0), torch.tensor(0.1))) + self.scale.requiresGrad = True # set requiresGrad to true! - self.translate = Parameter(torch.normal(torch.tensor(0.0), torch.tensor(0.1))) - self.translate.requiresGrad = True # set requiresGrad to true! + self.translate = Parameter( + torch.normal(torch.tensor(0.0), torch.tensor(0.1))) + self.translate.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' diff --git a/pina/adaptive_functions/adaptive_softplus.py b/pina/adaptive_functions/adaptive_softplus.py index c2069ff..d306832 100644 --- a/pina/adaptive_functions/adaptive_softplus.py +++ b/pina/adaptive_functions/adaptive_softplus.py @@ -1,6 +1,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveSoftplus(torch.nn.Module): ''' Implementation of soft exponential activation. @@ -18,6 +19,7 @@ class AdaptiveSoftplus(torch.nn.Module): >>> x = torch.randn(256) >>> x = a1(x) ''' + def __init__(self): ''' Initialization. @@ -31,7 +33,7 @@ class AdaptiveSoftplus(torch.nn.Module): self.soft = torch.nn.Softplus() self.scale = Parameter(torch.rand(1)) - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' @@ -39,4 +41,4 @@ class AdaptiveSoftplus(torch.nn.Module): Applies the function to the input elementwise. ''' #x += self.translate - return self.soft(x)*self.scale + return self.soft(x) * self.scale diff --git a/pina/adaptive_functions/adaptive_square.py b/pina/adaptive_functions/adaptive_square.py index 911673a..9b341a0 100644 --- a/pina/adaptive_functions/adaptive_square.py +++ b/pina/adaptive_functions/adaptive_square.py @@ -1,6 +1,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveSquare(torch.nn.Module): ''' Implementation of soft exponential activation. @@ -18,7 +19,8 @@ class AdaptiveSquare(torch.nn.Module): >>> x = torch.randn(256) >>> x = a1(x) ''' - def __init__(self, alpha = None): + + def __init__(self, alpha=None): ''' Initialization. INPUT: @@ -29,10 +31,10 @@ class AdaptiveSquare(torch.nn.Module): super(AdaptiveSquare, self).__init__() self.scale = Parameter(torch.tensor(1.0)) - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale.requiresGrad = True # set requiresGrad to true! self.translate = Parameter(torch.tensor(0.0)) - self.translate.requiresGrad = True # set requiresGrad to true! + self.translate.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' diff --git a/pina/adaptive_functions/adaptive_tanh.py b/pina/adaptive_functions/adaptive_tanh.py index 5b6bede..3a2c719 100644 --- a/pina/adaptive_functions/adaptive_tanh.py +++ b/pina/adaptive_functions/adaptive_tanh.py @@ -1,6 +1,7 @@ import torch from torch.nn.parameter import Parameter + class AdaptiveTanh(torch.nn.Module): ''' Implementation of soft exponential activation. @@ -18,7 +19,8 @@ class AdaptiveTanh(torch.nn.Module): >>> x = torch.randn(256) >>> x = a1(x) ''' - def __init__(self, alpha = None): + + def __init__(self, alpha=None): ''' Initialization. INPUT: @@ -31,17 +33,19 @@ class AdaptiveTanh(torch.nn.Module): # initialize alpha if alpha == None: - self.alpha = Parameter(torch.tensor(1.0)) # create a tensor out of alpha + self.alpha = Parameter( + torch.tensor(1.0)) # create a tensor out of alpha else: - self.alpha = Parameter(torch.tensor(alpha)) # create a tensor out of alpha - - self.alpha.requiresGrad = True # set requiresGrad to true! + self.alpha = Parameter( + torch.tensor(alpha)) # create a tensor out of alpha + + self.alpha.requiresGrad = True # set requiresGrad to true! self.scale = Parameter(torch.tensor(1.0)) - self.scale.requiresGrad = True # set requiresGrad to true! + self.scale.requiresGrad = True # set requiresGrad to true! self.translate = Parameter(torch.tensor(0.0)) - self.translate.requiresGrad = True # set requiresGrad to true! + self.translate.requiresGrad = True # set requiresGrad to true! def forward(self, x): ''' @@ -49,4 +53,6 @@ class AdaptiveTanh(torch.nn.Module): Applies the function to the input elementwise. ''' x += self.translate - return self.scale * (torch.exp(self.alpha * x) - torch.exp(-self.alpha * x))/(torch.exp(self.alpha * x) + torch.exp(-self.alpha * x)) + return self.scale * (torch.exp(self.alpha * x) - torch.exp( + -self.alpha * x)) / (torch.exp(self.alpha * x) + + torch.exp(-self.alpha * x)) diff --git a/pina/callbacks/__init__.py b/pina/callbacks/__init__.py index e0beae9..c9ba520 100644 --- a/pina/callbacks/__init__.py +++ b/pina/callbacks/__init__.py @@ -1,9 +1,5 @@ -__all__ = [ - 'SwitchOptimizer', - 'R3Refinement', - 'MetricTracker' -] +__all__ = ['SwitchOptimizer', 'R3Refinement', 'MetricTracker'] from .optimizer_callbacks import SwitchOptimizer from .adaptive_refinment_callbacks import R3Refinement -from .processing_callbacks import MetricTracker \ No newline at end of file +from .processing_callbacks import MetricTracker diff --git a/pina/callbacks/adaptive_refinment_callbacks.py b/pina/callbacks/adaptive_refinment_callbacks.py index 1be904d..5ec149b 100644 --- a/pina/callbacks/adaptive_refinment_callbacks.py +++ b/pina/callbacks/adaptive_refinment_callbacks.py @@ -6,37 +6,37 @@ import torch from ..utils import check_consistency - class R3Refinement(Callback): - """ - PINA implementation of a R3 Refinement Callback. - .. seealso:: - - **Original reference**: Daw, Arka, et al. "Mitigating Propagation Failures - in Physics-informed Neural Networks using - Retain-Resample-Release (R3) Sampling." (2023). - DOI: `10.48550/arXiv.2207.02338 - < https://doi.org/10.48550/arXiv.2207.02338>`_ - """ def __init__(self, sample_every): """ - R3 routine for sampling new points based on - adpative search. The algorithm incrementally - accumulate collocation points in regions of - high PDE residuals, and release the one which - have low residual. Points are sampled uniformmaly - in all region where sampling is needed. + PINA Implementation of an R3 Refinement Callback. + + This callback implements the R3 (Retain-Resample-Release) routine for sampling new points based on adaptive search. + The algorithm incrementally accumulates collocation points in regions of high PDE residuals, and releases those + with low residuals. Points are sampled uniformly in all regions where sampling is needed. + + .. seealso:: + + Original Reference: Daw, Arka, et al. *Mitigating Propagation Failures in Physics-informed Neural Networks + using Retain-Resample-Release (R3) Sampling. (2023)*. + DOI: `10.48550/arXiv.2207.02338 + `_ :param int sample_every: Frequency for sampling. + + :raises ValueError: If `sample_every` is not an integer. + + Example: + >>> r3_callback = R3Refinement(sample_every=5) """ super().__init__() # sample every check_consistency(sample_every, int) self._sample_every = sample_every - + def _compute_residual(self, trainer): """ Computes the residuals for a PINN object. @@ -63,7 +63,7 @@ class R3Refinement(Callback): target = condition.equation.residual(pts, solver.forward(pts)) res_loss[location] = torch.abs(target).as_subclass(torch.Tensor) tot_loss.append(torch.abs(target)) - + return torch.vstack(tot_loss), res_loss def _r3_routine(self, trainer): @@ -79,7 +79,7 @@ class R3Refinement(Callback): # !!!!!! From now everything is performed on CPU !!!!!! # average loss - avg = (tot_loss.mean()).to('cpu') + avg = (tot_loss.mean()).to('cpu') # points to keep old_pts = {} @@ -97,16 +97,18 @@ class R3Refinement(Callback): tot_points += len(pts) # extract new points to sample uniformally for each location - n_points = (self._tot_pop_numb - tot_points ) // len(self._sampling_locations) - remainder = (self._tot_pop_numb - tot_points ) % len(self._sampling_locations) + n_points = (self._tot_pop_numb - tot_points) // len( + self._sampling_locations) + remainder = (self._tot_pop_numb - tot_points) % len( + self._sampling_locations) n_uniform_points = [n_points] * len(self._sampling_locations) n_uniform_points[-1] += remainder # sample new points for numb_pts, loc in zip(n_uniform_points, self._sampling_locations): trainer._model.problem.discretise_domain(numb_pts, - 'random', - locations=[loc]) + 'random', + locations=[loc]) # adding previous population points trainer._model.problem.add_points(old_pts) @@ -114,6 +116,18 @@ class R3Refinement(Callback): trainer._create_or_update_loader() def on_train_start(self, trainer, _): + """ + Callback function called at the start of training. + + This method extracts the locations for sampling from the problem conditions and calculates the total population. + + :param trainer: The trainer object managing the training process. + :type trainer: pytorch_lightning.Trainer + :param _: Placeholder argument (not used). + + :return: None + :rtype: None + """ # extract locations for sampling problem = trainer._model.problem locations = [] @@ -122,7 +136,7 @@ class R3Refinement(Callback): if hasattr(condition, 'location'): locations.append(condition_name) self._sampling_locations = locations - + # extract total population total_population = 0 for location in self._sampling_locations: @@ -131,5 +145,17 @@ class R3Refinement(Callback): self._tot_pop_numb = total_population def on_train_epoch_end(self, trainer, __): + """ + Callback function called at the end of each training epoch. + + This method triggers the R3 routine for refinement if the current epoch is a multiple of `_sample_every`. + + :param trainer: The trainer object managing the training process. + :type trainer: pytorch_lightning.Trainer + :param __: Placeholder argument (not used). + + :return: None + :rtype: None + """ if trainer.current_epoch % self._sample_every == 0: - self._r3_routine(trainer) \ No newline at end of file + self._r3_routine(trainer) diff --git a/pina/callbacks/optimizer_callbacks.py b/pina/callbacks/optimizer_callbacks.py index 0f375f1..276983e 100644 --- a/pina/callbacks/optimizer_callbacks.py +++ b/pina/callbacks/optimizer_callbacks.py @@ -6,22 +6,30 @@ from ..utils import check_consistency class SwitchOptimizer(Callback): - """ - PINA implementation of a Lightining Callback to switch - optimizer during training. The rouutine can be used to - try multiple optimizers during the training, without the - need to stop training. - """ + def __init__(self, new_optimizers, new_optimizers_kwargs, epoch_switch): """ - SwitchOptimizer is a routine for switching optimizer during training. + PINA Implementation of a Lightning Callback to switch optimizer during training. - :param torch.optim.Optimizer | list new_optimizers: The model optimizers to - switch to. It must be a list of :class:`torch.optim.Optimizer` or list of - :class:`torch.optim.Optimizer` for multiple model solvers. - :param dict| list new_optimizers: The model optimizers keyword arguments to - switch use. It must be a dict or list of dict for multiple optimizers. - :param int epoch_switch: Epoch for switching optimizer. + This callback allows for switching between different optimizers during training, enabling + the exploration of multiple optimization strategies without the need to stop training. + + :param new_optimizers: The model optimizers to switch to. Can be a single + :class:`torch.optim.Optimizer` or a list of them for multiple model solvers. + :type new_optimizers: torch.optim.Optimizer | list + :param new_optimizers_kwargs: The keyword arguments for the new optimizers. Can be a single dictionary + or a list of dictionaries corresponding to each optimizer. + :type new_optimizers_kwargs: dict | list + :param epoch_switch: The epoch at which to switch to the new optimizer. + :type epoch_switch: int + + :raises ValueError: If `epoch_switch` is less than 1 or if there is a mismatch in the number of + optimizers and their corresponding keyword argument dictionaries. + + Example: + >>> switch_callback = SwitchOptimizer(new_optimizers=[optimizer1, optimizer2], + >>> new_optimizers_kwargs=[{'lr': 0.001}, {'lr': 0.01}], + >>> epoch_switch=10) """ super().__init__() @@ -44,19 +52,29 @@ class SwitchOptimizer(Callback): ' arguments for each optimizers.' f' Got {len_optimizer} optimizers, and' f' {len_optimizer_kwargs} dicitionaries') - + # save new optimizers self._new_optimizers = new_optimizers self._new_optimizers_kwargs = new_optimizers_kwargs self._epoch_switch = epoch_switch def on_train_epoch_start(self, trainer, __): + """ + Callback function to switch optimizer at the start of each training epoch. + + :param trainer: The trainer object managing the training process. + :type trainer: pytorch_lightning.Trainer + :param _: Placeholder argument (not used). + + :return: None + :rtype: None + """ if trainer.current_epoch == self._epoch_switch: optims = [] for idx, (optim, optim_kwargs) in enumerate( - zip(self._new_optimizers, - self._new_optimizers_kwargs) - ): - optims.append(optim(trainer._model.models[idx].parameters(), **optim_kwargs)) + zip(self._new_optimizers, self._new_optimizers_kwargs)): + optims.append( + optim(trainer._model.models[idx].parameters(), + **optim_kwargs)) - trainer.optimizers = optims \ No newline at end of file + trainer.optimizers = optims diff --git a/pina/callbacks/processing_callbacks.py b/pina/callbacks/processing_callbacks.py index 74ccc44..791d540 100644 --- a/pina/callbacks/processing_callbacks.py +++ b/pina/callbacks/processing_callbacks.py @@ -6,20 +6,53 @@ import copy class MetricTracker(Callback): - """ - PINA implementation of a Lightining Callback to track relevant - metrics during training. - """ + def __init__(self): + """ + PINA Implementation of a Lightning Callback for Metric Tracking. + + This class provides functionality to track relevant metrics during the training process. + + :ivar _collection: A list to store collected metrics after each training epoch. + + :param trainer: The trainer object managing the training process. + :type trainer: pytorch_lightning.Trainer + + :return: A dictionary containing aggregated metric values. + :rtype: dict + + Example: + >>> tracker = MetricTracker() + >>> # ... Perform training ... + >>> metrics = tracker.metrics + """ self._collection = [] def on_train_epoch_end(self, trainer, __): - self._collection.append(copy.deepcopy(trainer.logged_metrics)) # track them + """ + Collect and track metrics at the end of each training epoch. + + :param trainer: The trainer object managing the training process. + :type trainer: pytorch_lightning.Trainer + :param _: Placeholder argument. + + :return: None + :rtype: None + """ + self._collection.append(copy.deepcopy( + trainer.logged_metrics)) # track them @property def metrics(self): - common_keys = set.intersection(*map(set, self._collection)) - v = {k: torch.stack([dic[k] for dic in self._collection]) for k in common_keys} - return v + """ + Aggregate collected metrics during training. - \ No newline at end of file + :return: A dictionary containing aggregated metric values. + :rtype: dict + """ + common_keys = set.intersection(*map(set, self._collection)) + v = { + k: torch.stack([dic[k] for dic in self._collection]) + for k in common_keys + } + return v diff --git a/pina/condition.py b/pina/condition.py index 0fa0cc4..6994701 100644 --- a/pina/condition.py +++ b/pina/condition.py @@ -3,15 +3,17 @@ from .label_tensor import LabelTensor from .geometry import Location from .equation.equation import Equation + def dummy(a): """Dummy function for testing purposes.""" return None + class Condition: """ - The class `Condition` is used to represent the constraints (physical + The class ``Condition`` is used to represent the constraints (physical equations, boundary conditions, etc.) that should be satisfied in the - problem at hand. Condition objects are used to formulate the PINA :obj:`pina.problem.abstract_problem.Abstract_Problem` object. + problem at hand. Condition objects are used to formulate the PINA :obj:`pina.problem.abstract_problem.AbstractProblem` object. Conditions can be specified in three ways: 1. By specifying the input and output points of the condition; in such a @@ -49,8 +51,7 @@ class Condition: """ __slots__ = [ - 'input_points', 'output_points', 'location', 'equation', - 'data_weight' + 'input_points', 'output_points', 'location', 'equation', 'data_weight' ] def _dictvalue_isinstance(self, dict_, key_, class_): @@ -67,13 +68,14 @@ class Condition: self.data_weight = kwargs.pop('data_weight', 1.0) if len(args) != 0: - raise ValueError('Condition takes only the following keyword arguments: {`input_points`, `output_points`, `location`, `function`, `data_weight`}.') + raise ValueError( + 'Condition takes only the following keyword arguments: {`input_points`, `output_points`, `location`, `function`, `data_weight`}.' + ) - if ( - sorted(kwargs.keys()) != sorted(['input_points', 'output_points']) and - sorted(kwargs.keys()) != sorted(['location', 'equation']) and - sorted(kwargs.keys()) != sorted(['input_points', 'equation']) - ): + if (sorted(kwargs.keys()) != sorted(['input_points', 'output_points']) + and sorted(kwargs.keys()) != sorted(['location', 'equation']) + and sorted(kwargs.keys()) != sorted( + ['input_points', 'equation'])): raise ValueError(f'Invalid keyword arguments {kwargs.keys()}.') if not self._dictvalue_isinstance(kwargs, 'input_points', LabelTensor): diff --git a/pina/equation/equation.py b/pina/equation/equation.py index 577d6ee..2934d37 100644 --- a/pina/equation/equation.py +++ b/pina/equation/equation.py @@ -1,6 +1,7 @@ -""" Module """ +""" Module for Equation. """ from .equation_interface import EquationInterface + class Equation(EquationInterface): def __init__(self, equation): @@ -11,7 +12,7 @@ class Equation(EquationInterface): :param equation: A ``torch`` callable equation to evaluate the residual. - :type equation: callable + :type equation: Callable """ if not callable(equation): raise ValueError('equation must be a callable function.' @@ -29,4 +30,4 @@ class Equation(EquationInterface): :return: The residual evaluation of the specified equation. :rtype: LabelTensor """ - return self.__equation(input_, output_) \ No newline at end of file + return self.__equation(input_, output_) diff --git a/pina/equation/equation_factory.py b/pina/equation/equation_factory.py index dc09869..4edbf53 100644 --- a/pina/equation/equation_factory.py +++ b/pina/equation/equation_factory.py @@ -4,7 +4,7 @@ from ..operators import grad, div, laplacian class FixedValue(Equation): - + def __init__(self, value, components=None): """ Fixed Value Equation class. This class can be @@ -18,10 +18,12 @@ class FixedValue(Equation): all the output variables are considered. Default is ``None``. """ + def equation(input_, output_): if components is None: return output_ - value - return output_.extract(components) - value + return output_.extract(components) - value + super().__init__(equation) @@ -43,9 +45,11 @@ class FixedGradient(Equation): which the gradient is calculated. d should be a subset of the input labels. If ``None``, all the input variables are considered. Default is ``None``. - """ + """ + def equation(input_, output_): return grad(output_, input_, components=components, d=d) - value + super().__init__(equation) @@ -67,9 +71,11 @@ class FixedFlux(Equation): which the flux is calculated. d should be a subset of the input labels. If ``None``, all the input variables are considered. Default is ``None``. - """ + """ + def equation(input_, output_): return div(output_, input_, components=components, d=d) - value + super().__init__(equation) @@ -90,7 +96,9 @@ class Laplace(Equation): which the flux is calculated. d should be a subset of the input labels. If ``None``, all the input variables are considered. Default is ``None``. - """ + """ + def equation(input_, output_): return laplacian(output_, input_, components=components, d=d) + super().__init__(equation) diff --git a/pina/equation/system_equation.py b/pina/equation/system_equation.py index df852dc..910005f 100644 --- a/pina/equation/system_equation.py +++ b/pina/equation/system_equation.py @@ -1,8 +1,9 @@ -""" Module """ +""" Module for SystemEquation. """ import torch from .equation import Equation from ..utils import check_consistency + class SystemEquation(Equation): def __init__(self, list_equation, reduction='mean'): @@ -14,7 +15,7 @@ class SystemEquation(Equation): A ``SystemEquation`` is specified by a list of equations. - :param callable equation: A ``torch`` callable equation to + :param Callable equation: A ``torch`` callable equation to evaluate the residual :param str reduction: Specifies the reduction to apply to the output: ``none`` | ``mean`` | ``sum`` | ``callable``. ``none``: no reduction @@ -28,7 +29,7 @@ class SystemEquation(Equation): # equations definition self.equations = [] - for _, equation in enumerate(list_equation): + for _, equation in enumerate(list_equation): self.equations.append(Equation(equation)) # possible reduction @@ -39,7 +40,8 @@ class SystemEquation(Equation): elif (reduction == 'none') or callable(reduction): self.reduction = reduction else: - raise NotImplementedError('Only mean and sum reductions implemented.') + raise NotImplementedError( + 'Only mean and sum reductions implemented.') def residual(self, input_, output_): """ @@ -52,12 +54,10 @@ class SystemEquation(Equation): aggregated by the ``reduction`` defined in the ``__init__``. :rtype: LabelTensor """ - residual = torch.hstack([ - equation.residual(input_, output_) - for equation in self.equations - ]) - + residual = torch.hstack( + [equation.residual(input_, output_) for equation in self.equations]) + if self.reduction == 'none': return residual - - return self.reduction(residual, dim=-1) \ No newline at end of file + + return self.reduction(residual, dim=-1) diff --git a/pina/geometry/__init__.py b/pina/geometry/__init__.py index 5d38b32..a936069 100644 --- a/pina/geometry/__init__.py +++ b/pina/geometry/__init__.py @@ -1,13 +1,6 @@ __all__ = [ - 'Location', - 'CartesianDomain', - 'EllipsoidDomain', - 'Union', - 'Intersection', - 'Exclusion', - 'Difference', - 'OperationInterface', - 'SimplexDomain' + 'Location', 'CartesianDomain', 'EllipsoidDomain', 'Union', 'Intersection', + 'Exclusion', 'Difference', 'OperationInterface', 'SimplexDomain' ] from .location import Location diff --git a/pina/geometry/cartesian.py b/pina/geometry/cartesian.py index 6c8f810..84f6554 100644 --- a/pina/geometry/cartesian.py +++ b/pina/geometry/cartesian.py @@ -8,12 +8,12 @@ from ..utils import torch_lhs, chebyshev_roots class CartesianDomain(Location): """PINA implementation of Hypercube domain.""" - def __init__(self, span_dict): + def __init__(self, cartesian_dict): """ - :param span_dict: A dictionary with dict-key a string representing + :param cartesian_dict: A dictionary with dict-key a string representing the input variables for the pinn, and dict-value a list with the domain extrema. - :type span_dict: dict + :type cartesian_dict: dict :Example: >>> spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) @@ -21,7 +21,7 @@ class CartesianDomain(Location): self.fixed_ = {} self.range_ = {} - for k, v in span_dict.items(): + for k, v in cartesian_dict.items(): if isinstance(v, (int, float)): self.fixed_[k] = v elif isinstance(v, (list, tuple)) and len(v) == 2: @@ -33,28 +33,27 @@ class CartesianDomain(Location): def variables(self): """Spatial variables. - :return: Spatial variables defined in '__init__()' + :return: Spatial variables defined in ``__init__()`` :rtype: list[str] """ return list(self.fixed_.keys()) + list(self.range_.keys()) - def update(self, new_span): - """Adding new dimensions on the span + def update(self, new_domain): + """Adding new dimensions on the ``CartesianDomain`` - :param new_span: A new span object to merge - :type new_span: Span + :param CartesianDomain new_domain: A new ``CartesianDomain`` object to merge :Example: - >>> spatial_domain = Span({'x': [0, 1], 'y': [0, 1]}) + >>> spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) >>> spatial_domain.variables ['x', 'y'] - >>> spatial_domain_2 = Span({'z': [3, 4], 'w': [0, 1]}) + >>> spatial_domain_2 = CartesianDomain({'z': [3, 4], 'w': [0, 1]}) >>> spatial_domain.update(spatial_domain_2) >>> spatial_domain.variables ['x', 'y', 'z', 'w'] """ - self.fixed_.update(new_span.fixed_) - self.range_.update(new_span.range_) + self.fixed_.update(new_domain.fixed_) + self.range_.update(new_domain.range_) def _sample_range(self, n, mode, bounds): """Rescale the samples to the correct bounds @@ -62,11 +61,11 @@ class CartesianDomain(Location): :param n: Number of points to sample, see Note below for reference. :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'; - latin hypercube sampling, 'latin' or 'lh'; - chebyshev sampling, 'chebyshev'; grid sampling 'grid'. - :type mode: str, optional + :param mode: Mode for sampling, defaults to ``random``. + Available modes include: random sampling, ``random``; + latin hypercube sampling, ``latin`` or ``lh``; + chebyshev sampling, ``chebyshev``; grid sampling ``grid``. + :type mode: str :param bounds: Bounds to rescale the samples. :type bounds: torch.Tensor :return: Rescaled sample points. @@ -97,25 +96,27 @@ class CartesianDomain(Location): :param n: Number of points to sample, see Note below for reference. :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'; - latin hypercube sampling, 'latin' or 'lh'; - chebyshev sampling, 'chebyshev'; grid sampling 'grid'. - :type mode: str, optional - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional + :param mode: Mode for sampling, defaults to ``random``. + Available modes include: random sampling, ``random``; + latin hypercube sampling, ``latin`` or ``lh``; + chebyshev sampling, ``chebyshev``; grid sampling ``grid``. + :type mode: str + :param variables: pinn variable to be sampled, defaults to ``all``. + :type variables: str | list[str] + :return: Returns ``LabelTensor`` of n sampled points. + :rtype: LabelTensor .. note:: The total number of points sampled in case of multiple variables - is not 'n', and it depends on the chosen 'mode'. If 'mode' is - 'grid' or 'chebyshev', the points are sampled independentely + is not ``n``, and it depends on the chosen ``mode``. If ``mode`` is + 'grid' or ``chebyshev``, the points are sampled independentely across the variables and the results crossed together, i.e. the - final number of points is 'n' to the power of the number of - variables. If 'mode' is 'random', 'lh' or 'latin', the variables + final number of points is ``n`` to the power of the number of + variables. If 'mode' is 'random', ``lh`` or ``latin``, the variables are sampled all together, and the final number of points .. warning:: - The extrema values of Span are always sampled only for 'grid' mode. + The extrema values of Span are always sampled only for ``grid`` mode. :Example: >>> spatial_domain = Span({'x': [0, 1], 'y': [0, 1]}) @@ -142,6 +143,7 @@ class CartesianDomain(Location): [0.6667, 1.0000], [1.0000, 1.0000]]) """ + def _1d_sampler(n, mode, variables): """ Sample independentely the variables and cross the results""" tmp = [] @@ -161,8 +163,8 @@ class CartesianDomain(Location): for variable in variables: if variable in self.fixed_.keys(): value = self.fixed_[variable] - pts_variable = torch.tensor([[value]]).repeat( - result.shape[0], 1) + pts_variable = torch.tensor([[value] + ]).repeat(result.shape[0], 1) pts_variable = pts_variable.as_subclass(LabelTensor) pts_variable.labels = [variable] @@ -175,13 +177,13 @@ class CartesianDomain(Location): :param n: Number of points to sample. :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'; - latin hypercube sampling, 'latin' or 'lh'; - chebyshev sampling, 'chebyshev'; grid sampling 'grid'. - :type mode: str, optional. - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional. + :param mode: Mode for sampling, defaults to ``random``. + Available modes include: random sampling, ``random``; + latin hypercube sampling, ``latin`` or ``lh``; + chebyshev sampling, ``chebyshev``; grid sampling ``grid``. + :type mode: str. + :param variables: pinn variable to be sampled, defaults to ``all``. + :type variables: str or list[str]. :return: Sample points. :rtype: list[torch.Tensor] """ @@ -195,8 +197,8 @@ class CartesianDomain(Location): for variable in variables: if variable in self.fixed_.keys(): value = self.fixed_[variable] - pts_variable = torch.tensor([[value]]).repeat( - result.shape[0], 1) + pts_variable = torch.tensor([[value] + ]).repeat(result.shape[0], 1) pts_variable = pts_variable.as_subclass(LabelTensor) pts_variable.labels = [variable] @@ -241,16 +243,15 @@ class CartesianDomain(Location): else: raise ValueError(f'mode={mode} is not valid.') - def is_inside(self, point, check_border=False): """Check if a point is inside the ellipsoid. :param point: Point to be checked :type point: LabelTensor :param check_border: Check if the point is also on the frontier - of the hypercube, default False. + of the hypercube, default ``False``. :type check_border: bool - :return: Returning True if the point is inside, False otherwise. + :return: Returning ``True`` if the point is inside, ``False`` otherwise. :rtype: bool """ is_inside = [] @@ -268,7 +269,7 @@ class CartesianDomain(Location): check = bound[0] <= point.extract([variable]) <= bound[1] else: check = bound[0] < point.extract([variable]) < bound[1] - + is_inside.append(check) return all(is_inside) diff --git a/pina/geometry/difference_domain.py b/pina/geometry/difference_domain.py index 9a571cb..efdad63 100644 --- a/pina/geometry/difference_domain.py +++ b/pina/geometry/difference_domain.py @@ -1,73 +1,78 @@ -"""Module for Location class.""" +"""Module for Difference class.""" + import torch -from .exclusion_domain import Exclusion from .operation_interface import OperationInterface from ..label_tensor import LabelTensor class Difference(OperationInterface): - """ PINA implementation of Difference of Domains.""" def __init__(self, geometries): - """ + r""" PINA implementation of Difference of Domains. Given two sets :math:`A` and :math:`B` then the domain difference is defined as: - ..:math: - A \setminus B = \{x \mid x \in A \text{ and } x \not\in B\}, + .. math:: + A - B = \{x \mid x \in A \land x \not\in B\}, with :math:`x` a point in :math:`\mathbb{R}^N` and :math:`N` the dimension of the geometry space. - :param list geometries: A list of geometries from 'pina.geometry' - such as 'EllipsoidDomain' or 'CartesianDomain'. The first + :param list geometries: A list of geometries from ``pina.geometry`` + such as ``EllipsoidDomain`` or ``CartesianDomain``. The first geometry in the list is the geometry from which points are sampled. The rest of the geometries are the geometries that are excluded from the first geometry to find the difference. :Example: - # Create two ellipsoid domains + >>> # Create two ellipsoid domains >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]}) >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]}) - - # Create a Difference of the ellipsoid domains + >>> # Create a Difference of the ellipsoid domains >>> difference = Difference([ellipsoid1, ellipsoid2]) """ super().__init__(geometries) def is_inside(self, point, check_border=False): + """ + Check if a point is inside the ``Difference`` domain. + + :param point: Point to be checked. + :type point: torch.Tensor + :param bool check_border: If ``True``, the border is considered inside. + :return: ``True`` if the point is inside the Exclusion domain, ``False`` otherwise. + :rtype: bool + """ for geometry in self.geometries[1:]: if geometry.is_inside(point): return False return self.geometries[0].is_inside(point, check_border) def sample(self, n, mode='random', variables='all'): - """Sample routine for difference domain. + """ + Sample routine for ``Difference`` domain. - :param n: Number of points to sample in the shape. - :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'. - :type mode: str, optional - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional + :param int n: Number of points to sample in the shape. + :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param variables: Variables to be sampled, defaults to ``all``. + :type variables: str | list[str] + :return: Returns ``LabelTensor`` of n sampled points. + :rtype: LabelTensor :Example: - # Create two Cartesian domains + >>> # Create two Cartesian domains >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]}) >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]}) - - # Create a Difference of the ellipsoid domains + >>> # Create a Difference of the ellipsoid domains >>> difference = Difference([cartesian1, cartesian2]) - + >>> # Sampling >>> difference.sample(n=5) LabelTensor([[0.8400, 0.9179], [0.9154, 0.5769], [1.7403, 0.4835], [0.9545, 1.2851], [1.3726, 0.9831]]) - >>> len(difference.sample(n=5) 5 diff --git a/pina/geometry/ellipsoid.py b/pina/geometry/ellipsoid.py index d9c3680..e99425f 100644 --- a/pina/geometry/ellipsoid.py +++ b/pina/geometry/ellipsoid.py @@ -16,10 +16,10 @@ class EllipsoidDomain(Location): the domain extrema. :type ellipsoid_dict: dict :param sample_surface: A variable for choosing sample strategies. If - `sample_surface=True` only samples on the ellipsoid surface - frontier are taken. If `sample_surface=False` only samples on - the ellipsoid interior are taken, defaults to False. - :type sample_surface: bool, optional + ``sample_surface=True`` only samples on the ellipsoid surface + frontier are taken. If ``sample_surface=False`` only samples on + the ellipsoid interior are taken, defaults to ``False``. + :type sample_surface: bool .. warning:: Sampling for dimensions greater or equal to 10 could result @@ -84,22 +84,22 @@ class EllipsoidDomain(Location): """Check if a point is inside the ellipsoid domain. .. note:: - When ```'sample_surface'``` in the ```'__init()__'``` - is set to ```'True'```, then the method only checks + When ``sample_surface`` in the ``__init()__`` + is set to ``True``, then the method only checks points on the surface, and not inside the domain. :param point: Point to be checked. :type point: LabelTensor :param check_border: Check if the point is also on the frontier - of the ellipsoid, default False. + of the ellipsoid, default ``False``. :type check_border: bool - :return: Returning True if the point is inside, False otherwise. + :return: Returning True if the point is inside, ``False`` otherwise. :rtype: bool """ # small check that point is labeltensor check_consistency(point, LabelTensor) - + # get axis ellipse as tensors list_dict_vals = list(self._axis.values()) tmp = torch.tensor(list_dict_vals, dtype=torch.float) @@ -122,15 +122,15 @@ class EllipsoidDomain(Location): # calculate ellispoid equation eqn = torch.sum(point_sq.extract(ax_sq.labels) / ax_sq) - 1. - # if we have sampled only the surface, we check that the + # if we have sampled only the surface, we check that the # point is inside the surface border only if self._sample_surface: return torch.allclose(eqn, torch.zeros_like(eqn)) - # otherwise we check the ellipse + # otherwise we check the ellipse if check_border: return bool(eqn <= 0) - + return bool(eqn < 0) def _sample_range(self, n, mode, variables): @@ -138,8 +138,8 @@ class EllipsoidDomain(Location): :param n: Number of points to sample in the ellipsoid. :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'. + :param mode: Mode for sampling, defaults to ``random``. + Available modes include: random sampling, ``random``. :type mode: str, optional :param variables: Variables to be rescaled in the samples. :type variables: torch.Tensor @@ -195,13 +195,12 @@ class EllipsoidDomain(Location): def sample(self, n, mode='random', variables='all'): """Sample routine. - :param n: Number of points to sample in the ellipsoid. - :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'. - :type mode: str, optional - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional + :param int n: Number of points to sample in the shape. + :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param variables: Variables to be sampled, defaults to ``all``. + :type variables: str | list[str] + :return: Returns ``LabelTensor`` of n sampled points. + :rtype: LabelTensor :Example: >>> elips = Ellipsoid({'x':[1, 0], 'y':1}) @@ -219,12 +218,12 @@ class EllipsoidDomain(Location): :param n: Number of points to sample. :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'; + :param mode: Mode for sampling, defaults to ``random``. + Available modes include: random sampling, ``random``; latin hypercube sampling, 'latin' or 'lh'; chebyshev sampling, 'chebyshev'; grid sampling 'grid'. :type mode: str, optional. - :param variables: pinn variable to be sampled, defaults to 'all'. + :param variables: pinn variable to be sampled, defaults to ``all``. :type variables: str or list[str], optional. :return: Sample points. :rtype: list[torch.Tensor] diff --git a/pina/geometry/exclusion_domain.py b/pina/geometry/exclusion_domain.py index ebfbb8f..457289b 100644 --- a/pina/geometry/exclusion_domain.py +++ b/pina/geometry/exclusion_domain.py @@ -1,47 +1,45 @@ -"""Module for Location class.""" +"""Module for Exclusion class. """ + import torch -from .location import Location -from ..utils import check_consistency from ..label_tensor import LabelTensor import random from .operation_interface import OperationInterface class Exclusion(OperationInterface): - """ PINA implementation of Exclusion of Domains.""" def __init__(self, geometries): - """ + r""" PINA implementation of Exclusion of Domains. Given two sets :math:`A` and :math:`B` then the domain difference is defined as: - ..:math: - A \setminus B = \{x \mid x \in A \text{ and } x \in B\ \text{ and } x \not\in (A \text{ or } B)}, + .. math:: + A \setminus B = \{x \mid x \in A \land x \in B \land x \not\in (A \lor B)\}, with :math:`x` a point in :math:`\mathbb{R}^N` and :math:`N` the dimension of the geometry space. - :param list geometries: A list of geometries from 'pina.geometry' - such as 'EllipsoidDomain' or 'CartesianDomain'. + :param list geometries: A list of geometries from ``pina.geometry`` + such as ``EllipsoidDomain`` or ``CartesianDomain``. :Example: - # Create two ellipsoid domains + >>> # Create two ellipsoid domains >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]}) >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]}) - - # Create a Exclusion of the ellipsoid domains + >>> # Create a Exclusion of the ellipsoid domains >>> exclusion = Exclusion([ellipsoid1, ellipsoid2]) """ super().__init__(geometries) def is_inside(self, point, check_border=False): - """Check if a point is inside the Exclusion domain. + """ + Check if a point is inside the ``Exclusion`` domain. :param point: Point to be checked. :type point: torch.Tensor - :param bool check_border: If True, the border is considered inside. - :return: True if the point is inside the Exclusion domain, False otherwise. + :param bool check_border: If ``True``, the border is considered inside. + :return: ``True`` if the point is inside the Exclusion domain, ``False`` otherwise. :rtype: bool """ flag = 0 @@ -51,31 +49,29 @@ class Exclusion(OperationInterface): return flag == 1 def sample(self, n, mode='random', variables='all'): - """Sample routine for exclusion domain. + """ + Sample routine for ``Exclusion`` domain. - :param n: Number of points to sample in the shape. - :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'. - :type mode: str, optional - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional + :param int n: Number of points to sample in the shape. + :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param variables: Variables to be sampled, defaults to ``all``. + :type variables: str | list[str] + :return: Returns ``LabelTensor`` of n sampled points. + :rtype: LabelTensor :Example: - # Create two Cartesian domains + >>> # Create two Cartesian domains >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]}) >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]}) - - # Create a Exclusion of the ellipsoid domains + >>> # Create a Exclusion of the ellipsoid domains >>> Exclusion = Exclusion([cartesian1, cartesian2]) - + >>> # Sample >>> Exclusion.sample(n=5) LabelTensor([[2.4187, 1.5792], [2.7456, 2.3868], [2.3830, 1.7037], [0.8636, 1.8453], [0.1978, 0.3526]]) - >>> len(Exclusion.sample(n=5) 5 diff --git a/pina/geometry/intersection_domain.py b/pina/geometry/intersection_domain.py index be78885..e5ecb1a 100644 --- a/pina/geometry/intersection_domain.py +++ b/pina/geometry/intersection_domain.py @@ -1,48 +1,47 @@ -"""Module for Location class.""" +"""Module for Intersection class. """ + import torch -from .exclusion_domain import Exclusion from ..label_tensor import LabelTensor from .operation_interface import OperationInterface import random class Intersection(OperationInterface): - """ PINA implementation of Intersection of Domains.""" def __init__(self, geometries): - """ + r""" PINA implementation of Intersection of Domains. Given two sets :math:`A` and :math:`B` then the domain difference is defined as: - ..:math: - A \cap B = \{x \mid x \in A \text{ and } x \in B\}, + .. math:: + A \cap B = \{x \mid x \in A \land x \in B\}, with :math:`x` a point in :math:`\mathbb{R}^N` and :math:`N` the dimension of the geometry space. - :param list geometries: A list of geometries from 'pina.geometry' - such as 'EllipsoidDomain' or 'CartesianDomain'. The intersection + :param list geometries: A list of geometries from ``pina.geometry`` + such as ``EllipsoidDomain`` or ``CartesianDomain``. The intersection will be taken between all the geometries in the list. The resulting geometry will be the intersection of all the geometries in the list. :Example: - # Create two ellipsoid domains + >>> # Create two ellipsoid domains >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]}) >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]}) - - # Create a Intersection of the ellipsoid domains + >>> # Create a Intersection of the ellipsoid domains >>> intersection = Intersection([ellipsoid1, ellipsoid2]) """ super().__init__(geometries) def is_inside(self, point, check_border=False): - """Check if a point is inside the Exclusion domain. + """ + Check if a point is inside the ``Intersection`` domain. :param point: Point to be checked. :type point: torch.Tensor - :param bool check_border: If True, the border is considered inside. - :return: True if the point is inside the Exclusion domain, False otherwise. + :param bool check_border: If ``True``, the border is considered inside. + :return: ``True`` if the point is inside the Intersection domain, ``False`` otherwise. :rtype: bool """ flag = 0 @@ -52,31 +51,29 @@ class Intersection(OperationInterface): return flag == len(self.geometries) def sample(self, n, mode='random', variables='all'): - """Sample routine for intersection domain. + """ + Sample routine for ``Intersection`` domain. - :param n: Number of points to sample in the shape. - :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'. - :type mode: str, optional - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional + :param int n: Number of points to sample in the shape. + :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param variables: Variables to be sampled, defaults to ``all``. + :type variables: str | list[str] + :return: Returns ``LabelTensor`` of n sampled points. + :rtype: LabelTensor :Example: - # Create two Cartesian domains + >>> # Create two Cartesian domains >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]}) >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]}) - - # Create a Intersection of the ellipsoid domains + >>> # Create a Intersection of the ellipsoid domains >>> intersection = Intersection([cartesian1, cartesian2]) - + >>> # Sample >>> intersection.sample(n=5) LabelTensor([[1.7697, 1.8654], [1.2841, 1.1208], [1.7289, 1.9843], [1.3332, 1.2448], [1.9902, 1.4458]]) - >>> len(intersection.sample(n=5) 5 diff --git a/pina/geometry/location.py b/pina/geometry/location.py index c91b89a..a22dfe1 100644 --- a/pina/geometry/location.py +++ b/pina/geometry/location.py @@ -8,6 +8,7 @@ class Location(metaclass=ABCMeta): Abstract Location class. Any geometry entity should inherit from this class. """ + @abstractmethod def sample(self): """ @@ -22,9 +23,9 @@ class Location(metaclass=ABCMeta): Abstract method for checking if a point is inside the location. To be implemented in the child class. - :param tensor point: A tensor point to be checked. - :param bool check_border: a boolean that determines whether the border + :param torch.Tensor point: A tensor point to be checked. + :param bool check_border: A boolean that determines whether the border of the location is considered checked to be considered inside or - not. Defaults to False. + not. Defaults to ``False``. """ pass diff --git a/pina/geometry/operation_interface.py b/pina/geometry/operation_interface.py index ae6fc79..670e924 100644 --- a/pina/geometry/operation_interface.py +++ b/pina/geometry/operation_interface.py @@ -1,26 +1,18 @@ -import torch +""" Module for OperationInterface class. """ + from .location import Location from ..utils import check_consistency -from ..label_tensor import LabelTensor from abc import ABCMeta, abstractmethod -import random class OperationInterface(Location, metaclass=ABCMeta): - """PINA Operation Interface""" def __init__(self, geometries): """ - Abstract Operation class. - Any geometry operation entity must inherit from this class. + Abstract set operation class. Any geometry operation entity must inherit from this class. - .. warning:: - The ``sample_surface=True`` option is not implemented yet - for Difference, Intersection, and Exclusion. The usage will - result in unwanted behaviour. - - :param list geometries: A list of geometries from 'pina.geometry' - such as 'EllipsoidDomain' or 'CartesianDomain'. + :param list geometries: A list of geometries from ``pina.geometry`` + such as ``EllipsoidDomain`` or ``CartesianDomain``. """ # check consistency geometries check_consistency(geometries, Location) @@ -35,18 +27,33 @@ class OperationInterface(Location, metaclass=ABCMeta): @property def geometries(self): """ - The geometries.""" + The geometries to perform set operation. + """ return self._geometries @property def variables(self): """ - Spatial variables. + Spatial variables of the domain. :return: All the variables defined in ``__init__`` in order. :rtype: list[str] """ return self.geometries[0].variables + + @ abstractmethod + def is_inside(self, point, check_border=False): + """ + Check if a point is inside the resulting domain after + a set operation is applied. + + :param point: Point to be checked. + :type point: torch.Tensor + :param bool check_border: If ``True``, the border is considered inside. + :return: ``True`` if the point is inside the Intersection domain, ``False`` otherwise. + :rtype: bool + """ + pass def _check_dimensions(self, geometries): """Check if the dimensions of the geometries are consistent. diff --git a/pina/geometry/simplex.py b/pina/geometry/simplex.py index c371aec..6cfcfc7 100644 --- a/pina/geometry/simplex.py +++ b/pina/geometry/simplex.py @@ -13,17 +13,21 @@ class SimplexDomain(Location): :param simplex_matrix: A matrix of LabelTensor objects representing a vertex of the simplex (a tensor), and the coordinates of the point (a list of labels). + :type simplex_matrix: list[LabelTensor] :param sample_surface: A variable for choosing sample strategies. If - `sample_surface=True` only samples on the Simplex surface - frontier are taken. If `sample_surface=False`, no such criteria + ``sample_surface=True`` only samples on the Simplex surface + frontier are taken. If ``sample_surface=False``, no such criteria is followed. + :type sample_surface: bool + .. warning:: Sampling for dimensions greater or equal to 10 could result in a shrinking of the simplex, which degrades the quality of the samples. For dimensions higher than 10, other algorithms for sampling should be used. + :Example: >>> spatial_domain = SimplexDomain( [ @@ -48,12 +52,14 @@ class SimplexDomain(Location): matrix_labels = simplex_matrix[0].labels if not all(vertex.labels == matrix_labels for vertex in simplex_matrix): raise ValueError(f"Labels don't match.") - + # check consistency dimensions dim_simplex = len(matrix_labels) if len(simplex_matrix) != dim_simplex + 1: - raise ValueError("An n-dimensional simplex is composed by n + 1 tensors of dimension n.") - + raise ValueError( + "An n-dimensional simplex is composed by n + 1 tensors of dimension n." + ) + # creating vertices matrix self._vertices_matrix = LabelTensor.vstack(simplex_matrix) @@ -86,8 +92,10 @@ class SimplexDomain(Location): for i, coord in enumerate(self.variables): sorted_vertices = sorted(vertices, key=lambda vertex: vertex[i]) # respective coord bounded by the lowest and highest values - span_dict[coord] = [float(sorted_vertices[0][i]), - float(sorted_vertices[-1][i])] + span_dict[coord] = [ + float(sorted_vertices[0][i]), + float(sorted_vertices[-1][i]) + ] return CartesianDomain(span_dict) @@ -96,31 +104,32 @@ class SimplexDomain(Location): Check if a point is inside the simplex. Uses the algorithm described involving barycentric coordinates: https://en.wikipedia.org/wiki/Barycentric_coordinate_system. - .. note:: - When ```'sample_surface'``` in the ```'__init()__'``` - is set to ```'True'```, then the method only checks - points on the surface, and not inside the domain. + :param point: Point to be checked. :type point: LabelTensor :param check_border: Check if the point is also on the frontier - of the simplex, default False. + of the simplex, default ``False``. :type check_border: bool - :return: Returning True if the point is inside, False otherwise. + :return: Returning ``True`` if the point is inside, ``False`` otherwise. :rtype: bool + + .. note:: + When ``sample_surface`` in the ``__init()__`` + is set to ``True``, then the method only checks + points on the surface, and not inside the domain. """ if not all(label in self.variables for label in point.labels): - raise ValueError( - "Point labels different from constructor" - f" dictionary labels. Got {point.labels}," - f" expected {self.variables}." - ) + raise ValueError("Point labels different from constructor" + f" dictionary labels. Got {point.labels}," + f" expected {self.variables}.") point_shift = point - self._vertices_matrix[-1] point_shift = point_shift.tensor.reshape(-1, 1) # compute barycentric coordinates - lambda_ = torch.linalg.solve(self._vectors_shifted * 1.0, point_shift * 1.0) + lambda_ = torch.linalg.solve(self._vectors_shifted * 1.0, + point_shift * 1.0) lambda_1 = 1.0 - torch.sum(lambda_) lambdas = torch.vstack([lambda_, lambda_1]) @@ -128,16 +137,15 @@ class SimplexDomain(Location): if not check_border: return all(torch.gt(lambdas, 0.0)) and all(torch.lt(lambdas, 1.0)) - return all(torch.ge(lambdas, 0)) and ( - any(torch.eq(lambdas, 0)) or any(torch.eq(lambdas, 1)) - ) + return all(torch.ge(lambdas, 0)) and (any(torch.eq(lambdas, 0)) + or any(torch.eq(lambdas, 1))) def _sample_interior_randomly(self, n, variables): """ Randomly sample points inside a simplex of arbitrary dimension, without the boundary. :param int n: Number of points to sample in the shape. - :param variables: pinn variable to be sampled, defaults to 'all'. + :param variables: pinn variable to be sampled, defaults to ``all``. :type variables: str or list[str], optional :return: Returns tensor of n sampled points. :rtype: torch.Tensor @@ -155,9 +163,9 @@ class SimplexDomain(Location): sampled_points = [] while len(sampled_points) < n: - sampled_point = self._cartesian_bound.sample( - n=1, mode="random", variables=variables - ) + sampled_point = self._cartesian_bound.sample(n=1, + mode="random", + variables=variables) if self.is_inside(sampled_point, self._sample_surface): sampled_points.append(sampled_point) @@ -188,7 +196,9 @@ class SimplexDomain(Location): # extract number of vertices number_of_vertices = self._vertices_matrix.shape[0] # extract idx lambda to set to zero randomly - idx_lambda = torch.randint(low=0, high=number_of_vertices, size=(1,)) + idx_lambda = torch.randint(low=0, + high=number_of_vertices, + size=(1, )) # build lambda vector # 1. sampling [1, 2) lambdas = torch.rand((number_of_vertices, 1)) @@ -203,13 +213,14 @@ class SimplexDomain(Location): def sample(self, n, mode="random", variables="all"): """ Sample n points from Simplex domain. + :param int n: Number of points to sample in the shape. - :param str mode: Mode for sampling, defaults to 'random'. - Available modes include: 'random'. - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional - :return: Returns LabelTensor of n sampled points - :rtype: LabelTensor(tensor) + :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param variables: Variables to be sampled, defaults to ``all``. + :type variables: str | list[str] + :return: Returns ``LabelTensor`` of n sampled points. + :rtype: LabelTensor + .. warning:: When ``sample_surface = True`` in the initialization, all the variables are sampled, despite passing different once @@ -225,4 +236,4 @@ class SimplexDomain(Location): else: raise NotImplementedError(f"mode={mode} is not implemented.") - return LabelTensor(sample_pts, labels=self.variables) + return LabelTensor(sample_pts, labels=self.variables) \ No newline at end of file diff --git a/pina/geometry/union_domain.py b/pina/geometry/union_domain.py index 6f10c8a..1141236 100644 --- a/pina/geometry/union_domain.py +++ b/pina/geometry/union_domain.py @@ -1,49 +1,48 @@ +"""Module for Union class. """ + import torch -from .location import Location from .operation_interface import OperationInterface -from ..utils import check_consistency from ..label_tensor import LabelTensor import random class Union(OperationInterface): - """ PINA implementation of Unions of Domains.""" def __init__(self, geometries): - """ + r""" PINA implementation of Unions of Domains. Given two sets :math:`A` and :math:`B` then the domain difference is defined as: - ..:math: - A \cup B = \{x \mid x \in A \text{ or } x \in B\}, + .. math:: + A \cup B = \{x \mid x \in A \lor x \in B\}, with :math:`x` a point in :math:`\mathbb{R}^N` and :math:`N` the dimension of the geometry space. - :param list geometries: A list of geometries from 'pina.geometry' - such as 'EllipsoidDomain' or 'CartesianDomain'. + :param list geometries: A list of geometries from ``pina.geometry`` + such as ``EllipsoidDomain`` or ``CartesianDomain``. :Example: - # Create two ellipsoid domains + >>> # Create two ellipsoid domains >>> ellipsoid1 = EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]}) >>> ellipsoid2 = EllipsoidDomain({'x': [0, 2], 'y': [0, 2]}) - - # Create a union of the ellipsoid domains + >>> # Create a union of the ellipsoid domains >>> union = GeometryUnion([ellipsoid1, ellipsoid2]) """ super().__init__(geometries) def is_inside(self, point, check_border=False): - """Check if a point is inside the union domain. + """ + Check if a point is inside the ``Union`` domain. :param point: Point to be checked. :type point: LabelTensor :param check_border: Check if the point is also on the frontier - of the ellipsoid, default False. + of the ellipsoid, default ``False``. :type check_border: bool - :return: Returning True if the point is inside, False otherwise. + :return: Returning ``True`` if the point is inside, ``False`` otherwise. :rtype: bool """ for geometry in self.geometries: @@ -52,31 +51,29 @@ class Union(OperationInterface): return False def sample(self, n, mode='random', variables='all'): - """Sample routine for union domain. + """ + Sample routine for ``Union`` domain. - :param n: Number of points to sample in the shape. - :type n: int - :param mode: Mode for sampling, defaults to 'random'. - Available modes include: random sampling, 'random'. - :type mode: str, optional - :param variables: pinn variable to be sampled, defaults to 'all'. - :type variables: str or list[str], optional + :param int n: Number of points to sample in the shape. + :param str mode: Mode for sampling, defaults to ``random``. Available modes include: ``random``. + :param variables: Variables to be sampled, defaults to ``all``. + :type variables: str | list[str] + :return: Returns ``LabelTensor`` of n sampled points. + :rtype: LabelTensor :Example: - # Create two ellipsoid domains + >>> # Create two ellipsoid domains >>> cartesian1 = CartesianDomain({'x': [0, 2], 'y': [0, 2]}) >>> cartesian2 = CartesianDomain({'x': [1, 3], 'y': [1, 3]}) - - # Create a union of the ellipsoid domains + >>> # Create a union of the ellipsoid domains >>> union = Union([cartesian1, cartesian2]) - + >>> # Sample >>> union.sample(n=5) LabelTensor([[1.2128, 2.1991], [1.3530, 2.4317], [2.2562, 1.6605], [0.8451, 1.9878], [1.8623, 0.7102]]) - >>> len(union.sample(n=5) 5 """ @@ -95,8 +92,9 @@ class Union(OperationInterface): # int(i < remainder) is one only if we have a remainder # different than zero. Notice that len(geometries) is # always smaller than remaider. - sampled_points.append(geometry.sample( - num_points + int(i < remainder), mode, variables)) + sampled_points.append( + geometry.sample(num_points + int(i < remainder), mode, + variables)) # in case number of sampled points is smaller than the number of geometries if len(sampled_points) >= n: break diff --git a/pina/label_tensor.py b/pina/label_tensor.py index c11f7a1..502d31f 100644 --- a/pina/label_tensor.py +++ b/pina/label_tensor.py @@ -17,9 +17,9 @@ class LabelTensor(torch.Tensor): labels. Such labels uniquely identify the columns of the tensor, allowing for an easier manipulation. - :param torch.Tensor x: the data tensor. - :param labels: the labels of the columns. - :type labels: str or iterable(str) + :param torch.Tensor x: The data tensor. + :param labels: The labels of the columns. + :type labels: str | list(str) | tuple(str) :Example: >>> from pina import LabelTensor @@ -72,10 +72,8 @@ class LabelTensor(torch.Tensor): labels = [labels] if len(labels) != x.shape[-1]: - raise ValueError( - 'the tensor has not the same number of columns of ' - 'the passed labels.' - ) + raise ValueError('the tensor has not the same number of columns of ' + 'the passed labels.') self._labels = labels @property @@ -90,11 +88,10 @@ class LabelTensor(torch.Tensor): @labels.setter def labels(self, labels): if len(labels) != self.shape[self.ndim - 1]: # small check - raise ValueError( - 'the tensor has not the same number of columns of ' - 'the passed labels.') + raise ValueError('The tensor has not the same number of columns of ' + 'the passed labels.') - self._labels = labels # assign the label + self._labels = labels # assign the label @staticmethod def vstack(label_tensors): @@ -123,7 +120,7 @@ class LabelTensor(torch.Tensor): Clone the LabelTensor. For more details, see :meth:`torch.Tensor.clone`. - :return: a copy of the tensor + :return: A copy of the tensor. :rtype: LabelTensor """ # # used before merging @@ -173,12 +170,12 @@ class LabelTensor(torch.Tensor): def extract(self, label_to_extract): """ Extract the subset of the original tensor by returning all the columns - corresponding to the passed `label_to_extract`. + corresponding to the passed ``label_to_extract``. - :param label_to_extract: the label(s) to extract. - :type label_to_extract: str or iterable(str) - :raises TypeError: labels are not str - :raises ValueError: label to extract is not in the labels list + :param label_to_extract: The label(s) to extract. + :type label_to_extract: str | list(str) | tuple(str) + :raises TypeError: Labels are not ``str``. + :raises ValueError: Label to extract is not in the labels ``list``. """ if isinstance(label_to_extract, str): @@ -211,7 +208,7 @@ class LabelTensor(torch.Tensor): return detached - def requires_grad_(self, mode = True) -> Tensor: + def requires_grad_(self, mode = True): lt = super().requires_grad_(mode) lt.labels = self.labels return lt @@ -220,9 +217,9 @@ class LabelTensor(torch.Tensor): """ Return a copy of the merged tensors. - :param LabelTensor lt: the tensor to merge. + :param LabelTensor lt: The tensor to merge. :param str mode: {'std', 'first', 'cross'} - :return: the merged tensors + :return: The merged tensors. :rtype: LabelTensor """ if set(self.labels).intersection(lt.labels): @@ -239,12 +236,9 @@ class LabelTensor(torch.Tensor): n1 = tensor1.shape[0] n2 = tensor2.shape[0] - tensor1 = LabelTensor( - tensor1.repeat(n2, 1), - labels=tensor1.labels) - tensor2 = LabelTensor( - tensor2.repeat_interleave(n1, dim=0), - labels=tensor2.labels) + tensor1 = LabelTensor(tensor1.repeat(n2, 1), labels=tensor1.labels) + tensor2 = LabelTensor(tensor2.repeat_interleave(n1, dim=0), + labels=tensor2.labels) new_tensor = torch.cat((tensor1, tensor2), dim=1) new_tensor = new_tensor.as_subclass(LabelTensor) @@ -290,7 +284,7 @@ class LabelTensor(torch.Tensor): def __len__(self) -> int: return super().__len__() - + def __str__(self): if hasattr(self, 'labels'): s = f'labels({str(self.labels)})\n' diff --git a/pina/loss.py b/pina/loss.py index d2d0574..8c0b67e 100644 --- a/pina/loss.py +++ b/pina/loss.py @@ -1,6 +1,5 @@ """ Module for Loss class """ - from abc import ABCMeta, abstractmethod from torch.nn.modules.loss import _Loss import torch @@ -8,39 +7,47 @@ from .utils import check_consistency __all__ = ['LossInterface', 'LpLoss', 'PowerLoss'] + class LossInterface(_Loss, metaclass=ABCMeta): """ - The abstract `LossInterface` class. All the class defining a PINA Loss + The abstract ``LossInterface`` class. All the class defining a PINA Loss should be inheritied from this class. """ - def __init__(self, reduction = 'mean'): + def __init__(self, reduction='mean'): """ :param str reduction: Specifies the reduction to apply to the output: - ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction - will be applied, ``'mean'``: the sum of the output will be divided - by the number of elements in the output, ``'sum'``: the output will - be summed. Note: :attr:`size_average` and :attr:`reduce` are in the + ``none`` | ``mean`` | ``sum``. When ``none``: no reduction + will be applied, ``mean``: the sum of the output will be divided + by the number of elements in the output, ``sum``: the output will + be summed. Note: ``size_average`` and ``reduce`` are in the process of being deprecated, and in the meantime, specifying either of - those two args will override :attr:`reduction`. Default: ``'mean'``. + those two args will override ``reduction``. Default: ``mean``. """ super().__init__(reduction=reduction, size_average=None, reduce=None) @abstractmethod - def forward(self): + def forward(self, input, target): + """Forward method for loss function. + + :param torch.Tensor input: Input tensor from real data. + :param torch.Tensor target: Model tensor output. + :return: Loss evaluation. + :rtype: torch.Tensor + """ pass def _reduction(self, loss): """Simple helper function to check reduction :param reduction: Specifies the reduction to apply to the output: - ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction - will be applied, ``'mean'``: the sum of the output will be divided - by the number of elements in the output, ``'sum'``: the output will - be summed. Note: :attr:`size_average` and :attr:`reduce` are in the + ``none`` | ``mean`` | ``sum``. When ``none``: no reduction + will be applied, ``mean``: the sum of the output will be divided + by the number of elements in the output, ``sum``: the output will + be summed. Note: ``size_average`` and ``reduce`` are in the process of being deprecated, and in the meantime, specifying either of - those two args will override :attr:`reduction`. Default: ``'mean'``. - :type reduction: str, optional + those two args will override ``reduction``. Default: ``mean``. + :type reduction: str :param loss: Loss tensor for each element. :type loss: torch.Tensor :return: Reduced loss. @@ -56,13 +63,14 @@ class LossInterface(_Loss, metaclass=ABCMeta): raise ValueError(self.reduction + " is not valid") return ret + class LpLoss(LossInterface): r""" The Lp loss implementation class. Creates a criterion that measures the Lp error between each element in the input :math:`x` and target :math:`y`. - The unreduced (i.e. with :attr:`reduction` set to ``none``) loss can + The unreduced (i.e. with ``reduction`` set to ``none``) loss can be described as: .. math:: @@ -75,8 +83,8 @@ class LpLoss(LossInterface): \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }{[\sum_{i=1}^{D}|y_n^i|^p]}, - where :math:`N` is the batch size. If :attr:`reduction` is not ``none`` - (default ``'mean'``), then: + where :math:`N` is the batch size. If ``reduction`` is not ``none`` + (default ``mean``), then: .. math:: \ell(x, y) = @@ -90,21 +98,20 @@ class LpLoss(LossInterface): The sum operation still operates over all the elements, and divides by :math:`n`. - The division by :math:`n` can be avoided if one sets :attr:`reduction` to ``sum``. + The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``. """ - def __init__(self, p=2, reduction = 'mean', relative = False): + def __init__(self, p=2, reduction='mean', relative=False): """ :param int p: Degree of Lp norm. It specifies the type of norm to - be calculated. See :meth:`torch.linalg.norm` ```'ord'``` to - see the possible degrees. Default 2 (euclidean norm). + be calculated. See `list of possible orders in torch linalg + `_ to + for possible degrees. Default 2 (euclidean norm). :param str reduction: Specifies the reduction to apply to the output: - ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction - will be applied, ``'mean'``: the sum of the output will be divided - by the number of elements in the output, ``'sum'``: the output will - be summed. Note: :attr:`size_average` and :attr:`reduce` are in the - process of being deprecated, and in the meantime, specifying either of - those two args will override :attr:`reduction`. Default: ``'mean'``. + ``none`` | ``mean`` | ``sum``. ``none``: no reduction + will be applied, ``mean``: the sum of the output will be divided + by the number of elements in the output, ``sum``: the output will + be summed. :param bool relative: Specifies if relative error should be computed. """ super().__init__(reduction=reduction) @@ -124,11 +131,10 @@ class LpLoss(LossInterface): :return: Loss evaluation. :rtype: torch.Tensor """ - loss = torch.linalg.norm((input-target), ord=self.p, dim=-1) + loss = torch.linalg.norm((input - target), ord=self.p, dim=-1) if self.relative: - loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1) + loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1) return self._reduction(loss) - class PowerLoss(LossInterface): @@ -137,7 +143,7 @@ class PowerLoss(LossInterface): the error between each element in the input :math:`x` and target :math:`y` powered to a specific integer. - The unreduced (i.e. with :attr:`reduction` set to ``none``) loss can + The unreduced (i.e. with ``reduction`` set to ``none``) loss can be described as: .. math:: @@ -150,8 +156,8 @@ class PowerLoss(LossInterface): \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }{\sum_{i=1}^{D}|y_n^i|^p}, - where :math:`N` is the batch size. If :attr:`reduction` is not ``none`` - (default ``'mean'``), then: + where :math:`N` is the batch size. If ``reduction`` is not ``none`` + (default ``mean``), then: .. math:: \ell(x, y) = @@ -165,27 +171,26 @@ class PowerLoss(LossInterface): The sum operation still operates over all the elements, and divides by :math:`n`. - The division by :math:`n` can be avoided if one sets :attr:`reduction` to ``sum``. + The division by :math:`n` can be avoided if one sets ``reduction`` to ``sum``. """ - def __init__(self, p=2, reduction = 'mean', relative = False): + def __init__(self, p=2, reduction='mean', relative=False): """ :param int p: Degree of Lp norm. It specifies the type of norm to - be calculated. See :meth:`torch.linalg.norm` ```'ord'``` to + be calculated. See `list of possible orders in torch linalg + `_ to see the possible degrees. Default 2 (euclidean norm). :param str reduction: Specifies the reduction to apply to the output: - ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction - will be applied, ``'mean'``: the sum of the output will be divided - by the number of elements in the output, ``'sum'``: the output will - be summed. Note: :attr:`size_average` and :attr:`reduce` are in the - process of being deprecated, and in the meantime, specifying either of - those two args will override :attr:`reduction`. Default: ``'mean'``. + ``none`` | ``mean`` | ``sum``. When ``none``: no reduction + will be applied, ``mean``: the sum of the output will be divided + by the number of elements in the output, ``sum``: the output will + be summed. :param bool relative: Specifies if relative error should be computed. """ super().__init__(reduction=reduction) # check consistency - check_consistency(p, (str,int,float)) + check_consistency(p, (str, int, float)) self.p = p check_consistency(relative, bool) self.relative = relative @@ -198,7 +203,7 @@ class PowerLoss(LossInterface): :return: Loss evaluation. :rtype: torch.Tensor """ - loss = torch.abs((input-target)).pow(self.p).mean(-1) + loss = torch.abs((input - target)).pow(self.p).mean(-1) if self.relative: - loss = loss / torch.abs(input).pow(self.p).mean(-1) - return self._reduction(loss) \ No newline at end of file + loss = loss / torch.abs(input).pow(self.p).mean(-1) + return self._reduction(loss) diff --git a/pina/meta.py b/pina/meta.py index cfd4e09..7924232 100644 --- a/pina/meta.py +++ b/pina/meta.py @@ -1,13 +1,7 @@ __all__ = [ - '__project__', - '__title__', - '__author__', - '__copyright__', - '__license__', - '__version__', - '__mail__', - '__maintainer__', - '__status__'] + '__project__', '__title__', '__author__', '__copyright__', '__license__', + '__version__', '__mail__', '__maintainer__', '__status__' +] __project__ = 'PINA' __title__ = "pina" @@ -15,8 +9,7 @@ __author__ = "PINA Contributors" __copyright__ = "Copyright 2021-2023, PINA Contributors" __license__ = "MIT" __version__ = "0.1" -__mail__ = 'demo.nicola@gmail.com, ' # TODO +__mail__ = 'demo.nicola@gmail.com, dario.coscia@sissa.it' # TODO __maintainer__ = __author__ __status__ = "Alpha" __packagename__ = "pina-mathlab" - diff --git a/pina/model/deeponet.py b/pina/model/deeponet.py index 454724c..290c877 100644 --- a/pina/model/deeponet.py +++ b/pina/model/deeponet.py @@ -17,15 +17,16 @@ class MIONet(torch.nn.Module): .. seealso:: **Original reference**: Jin, Pengzhan, Shuai Meng, and Lu Lu. - "MIONet: Learning multiple-input operators via tensor product." + *MIONet: Learning multiple-input operators via tensor product.* SIAM Journal on Scientific Computing 44.6 (2022): A3490-A351 DOI: `10.1137/22M1477751 `_ """ + def __init__(self, networks, - aggregator="*", + aggregator="*", reduction="+", scale=True, translation=True): @@ -35,28 +36,27 @@ class MIONet(torch.nn.Module): as value the list of indeces to extract from the input variable in the forward pass of the neural network. If a list of ``int`` is passed, the corresponding columns of the inner most entries are extracted. - If a list of ``str`` is passed the variables of the corresponding :class:`LabelTensor` + If a list of ``str`` is passed the variables of the corresponding :py:obj:`pina.label_tensor.LabelTensor` are extracted. The ``torch.nn.Module`` model has to take as input a - :class:`LabelTensor` or :class:`torch.Tensor`. Default implementation consist of different - branch nets and one trunk net. - :param str | callable aggregator: Aggregator to be used to aggregate + :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. + Default implementation consist of different branch nets and one trunk nets. + :param str or Callable aggregator: Aggregator to be used to aggregate partial results from the modules in `nets`. Partial results are - aggregated component-wise. See - :func:`pina.model.deeponet.MIONet._symbol_functions` for the - available default aggregators. - :param str | callable reduction: Reduction to be used to reduce + aggregated component-wise. Available aggregators include + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. + :param str or Callable reduction: Reduction to be used to reduce the aggregated result of the modules in `nets` to the desired output - dimension. See :py:obj:`pina.model.deeponet.MIONet._symbol_functions` - for the available default reductions. - :param bool | callable scale: Scaling the final output before returning the - forward pass, default True. - :param bool | callable translation: Translating the final output before - returning the forward pass, default True. + dimension. Available reductions include + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. + :param bool or Callable scale: Scaling the final output before returning the + forward pass, default ``True``. + :param bool or Callable translation: Translating the final output before + returning the forward pass, default ``True``. .. warning:: In the forward pass we do not check if the input is instance of - :class:`LabelTensor` or :class:`torch.Tensor`. A general rule is - that for a :class:`LabelTensor` input both list of integers and + :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. A general rule is + that for a :py:obj:`pina.label_tensor.LabelTensor` input both list of integers and list of strings can be passed for ``input_indeces_branch_net`` and ``input_indeces_trunk_net``. Differently, for a :class:`torch.Tensor` only a list of integers can be passed for ``input_indeces_branch_net`` @@ -133,8 +133,10 @@ class MIONet(torch.nn.Module): self._init_reduction(reduction=reduction) # scale and translation - self._scale = torch.nn.Parameter(torch.tensor([1.0])) if scale else torch.tensor([1.0]) - self._trasl = torch.nn.Parameter(torch.tensor([1.0])) if translation else torch.tensor([1.0]) + self._scale = torch.nn.Parameter(torch.tensor( + [1.0])) if scale else torch.tensor([1.0]) + self._trasl = torch.nn.Parameter(torch.tensor( + [1.0])) if translation else torch.tensor([1.0]) @staticmethod def _symbol_functions(**kwargs): @@ -149,7 +151,7 @@ class MIONet(torch.nn.Module): "min": lambda x: torch.min(x, **kwargs).values, "max": lambda x: torch.max(x, **kwargs).values, } - + def _init_aggregator(self, aggregator): aggregator_funcs = DeepONet._symbol_functions(dim=2) if aggregator in aggregator_funcs: @@ -161,7 +163,6 @@ class MIONet(torch.nn.Module): self._aggregator = aggregator_func - def _init_reduction(self, reduction): reduction_funcs = DeepONet._symbol_functions(dim=-1) if reduction in reduction_funcs: @@ -178,27 +179,32 @@ class MIONet(torch.nn.Module): try: return x.extract(indeces) except AttributeError: - raise RuntimeError('Not possible to extract input variables from tensor.' - ' Ensure that the passed tensor is a LabelTensor or' - ' pass list of integers to extract variables. For' - ' more information refer to warning in the documentation.') + raise RuntimeError( + 'Not possible to extract input variables from tensor.' + ' Ensure that the passed tensor is a LabelTensor or' + ' pass list of integers to extract variables. For' + ' more information refer to warning in the documentation.') elif isinstance(indeces[0], int): return x[..., indeces] else: - raise RuntimeError('Not able to extract right indeces for tensor.' - ' For more information refer to warning in the documentation.') - + raise RuntimeError( + 'Not able to extract right indeces for tensor.' + ' For more information refer to warning in the documentation.') + def forward(self, x): """ Defines the computation performed at every call. - :param LabelTensor | torch.Tensor x: The input tensor for the forward call. + :param LabelTensor or torch.Tensor x: The input tensor for the forward call. :return: The output computed by the DeepONet model. - :rtype: LabelTensor | torch.Tensor + :rtype: LabelTensor or torch.Tensor """ # forward pass - output_ = [model(self._get_vars(x, indeces)) for model, indeces in zip(self.models, self._indeces)] + output_ = [ + model(self._get_vars(x, indeces)) + for model, indeces in zip(self.models, self._indeces) + ] # aggregation aggregated = self._aggregator(torch.dstack(output_)) @@ -206,7 +212,7 @@ class MIONet(torch.nn.Module): # reduce output_ = self._reduction(aggregated).reshape(-1, 1) - # scale and translate + # scale and translate output_ *= self._scale output_ += self._trasl @@ -218,7 +224,7 @@ class MIONet(torch.nn.Module): The aggregator function. """ return self._aggregator - + @property def reduction(self): """ @@ -232,28 +238,28 @@ class MIONet(torch.nn.Module): The scale factor. """ return self._scale - + @property def translation(self): """ The translation factor for MIONet. """ return self._trasl - + @property def indeces_variables_extracted(self): """ The input indeces for each model in form of list. """ return self._indeces - + @property def model(self): """ The models in form of list. """ return self._indeces - + class DeepONet(MIONet): """ @@ -273,52 +279,52 @@ class DeepONet(MIONet): `_ """ + def __init__(self, branch_net, trunk_net, input_indeces_branch_net, input_indeces_trunk_net, - aggregator="*", + aggregator="*", reduction="+", scale=True, translation=True): """ :param torch.nn.Module branch_net: The neural network to use as branch - model. It has to take as input a :class:`LabelTensor` + model. It has to take as input a :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. The number of dimensions of the output has to be the same of the ``trunk_net``. :param torch.nn.Module trunk_net: The neural network to use as trunk - model. It has to take as input a :class:`LabelTensor` + model. It has to take as input a :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. The number of dimensions of the output has to be the same of the ``branch_net``. - :param list(int) | list(str) input_indeces_branch_net: List of indeces + :param list(int) or list(str) input_indeces_branch_net: List of indeces to extract from the input variable in the forward pass for the branch net. If a list of ``int`` is passed, the corresponding columns of the inner most entries are extracted. If a list of ``str`` is passed - the variables of the corresponding :class:`LabelTensor` are extracted. - :param list(int) | list(str) input_indeces_trunk_net: List of indeces + the variables of the corresponding :py:obj:`pina.label_tensor.LabelTensor` are extracted. + :param list(int) or list(str) input_indeces_trunk_net: List of indeces to extract from the input variable in the forward pass for the trunk net. If a list of ``int`` is passed, the corresponding columns of the inner most entries are extracted. If a list of ``str`` is passed - the variables of the corresponding :class:`LabelTensor` are extracted. - :param str | callable aggregator: Aggregator to be used to aggregate + the variables of the corresponding :py:obj:`pina.label_tensor.LabelTensor` are extracted. + :param str or Callable aggregator: Aggregator to be used to aggregate partial results from the modules in `nets`. Partial results are - aggregated component-wise. See - :func:`pina.model.deeponet.MIONet._symbol_functions` for the - available default aggregators. - :param str | callable reduction: Reduction to be used to reduce + aggregated component-wise. Available aggregators include + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. + :param str or Callable reduction: Reduction to be used to reduce the aggregated result of the modules in `nets` to the desired output - dimension. See :py:obj:`pina.model.deeponet.MIONet._symbol_functions` for the available default - reductions. - :param bool | callable scale: Scaling the final output before returning the + dimension. Available reductions include + sum: ``+``, product: ``*``, mean: ``mean``, min: ``min``, max: ``max``. + :param bool or Callable scale: Scaling the final output before returning the forward pass, default True. - :param bool | callable translation: Translating the final output before + :param bool or Callable translation: Translating the final output before returning the forward pass, default True. .. warning:: In the forward pass we do not check if the input is instance of - :class:`LabelTensor` or :class:`torch.Tensor`. A general rule is - that for a :class:`LabelTensor` input both list of integers and + :py:obj:`pina.label_tensor.LabelTensor` or :class:`torch.Tensor`. A general rule is + that for a :py:obj:`pina.label_tensor.LabelTensor` input both list of integers and list of strings can be passed for ``input_indeces_branch_net`` and ``input_indeces_trunk_net``. Differently, for a :class:`torch.Tensor` only a list of integers can be passed for ``input_indeces_branch_net`` @@ -355,24 +361,38 @@ class DeepONet(MIONet): ) ) """ - networks = {branch_net : input_indeces_branch_net, - trunk_net : input_indeces_trunk_net} + networks = { + branch_net: input_indeces_branch_net, + trunk_net: input_indeces_trunk_net + } super().__init__(networks=networks, aggregator=aggregator, reduction=reduction, scale=scale, translation=translation) - @property - def branch_net(self): - """ - The branch net for DeepONet. - """ - return self.models[0] - - @property - def trunk_net(self): - """ - The trunk net for DeepONet. - """ - return self.models[1] \ No newline at end of file + + def forward(self, x): + """ + Defines the computation performed at every call. + + :param LabelTensor or torch.Tensor x: The input tensor for the forward call. + :return: The output computed by the DeepONet model. + :rtype: LabelTensor or torch.Tensor + """ + return super().forward(x) + + + @property + def branch_net(self): + """ + The branch net for DeepONet. + """ + return self.models[0] + + @property + def trunk_net(self): + """ + The trunk net for DeepONet. + """ + return self.models[1] diff --git a/pina/model/feed_forward.py b/pina/model/feed_forward.py index 3ad75d9..63d9bb2 100644 --- a/pina/model/feed_forward.py +++ b/pina/model/feed_forward.py @@ -11,11 +11,11 @@ class FeedForward(torch.nn.Module): perceptron. :param int input_dimensions: The number of input components of the model. - Expected tensor shape of the form (*, input_dimensions), where * - means any number of dimensions including none. + Expected tensor shape of the form :math:`(*, d)`, where * + means any number of dimensions including none, and :math:`d` the ``input_dimensions``. :param int output_dimensions: The number of output components of the model. - Expected tensor shape of the form (*, output_dimensions), where * - means any number of dimensions including none. + Expected tensor shape of the form :math:`(*, d)`, where * + means any number of dimensions including none, and :math:`d` the ``output_dimensions``. :param int inner_size: number of neurons in the hidden layer(s). Default is 20. :param int n_layers: number of hidden layers. Default is 2. @@ -23,18 +23,24 @@ class FeedForward(torch.nn.Module): :class:`torch.nn.Module` is passed, this is used as activation function after any layers, except the last one. If a list of Modules is passed, they are used as activation functions at any layers, in order. - :param iterable(int) layers: a list containing the number of neurons for - any hidden layers. If specified, the parameters `n_layers` e - `inner_size` are not considered. - :param bool bias: If `True` the MLP will consider some bias. + :param list(int) | tuple(int) layers: a list containing the number of neurons for + any hidden layers. If specified, the parameters ``n_layers`` e + ``inner_size`` are not considered. + :param bool bias: If ``True`` the MLP will consider some bias. """ - def __init__(self, input_dimensions, output_dimensions, inner_size=20, - n_layers=2, func=nn.Tanh, layers=None, bias=True): + + def __init__(self, + input_dimensions, + output_dimensions, + inner_size=20, + n_layers=2, + func=nn.Tanh, + layers=None, + bias=True): """ """ super().__init__() - if not isinstance(input_dimensions, int): raise ValueError('input_dimensions expected to be int.') self.input_dimension = input_dimensions @@ -52,8 +58,7 @@ class FeedForward(torch.nn.Module): self.layers = [] for i in range(len(tmp_layers) - 1): self.layers.append( - nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias) - ) + nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias)) if isinstance(func, list): self.functions = func @@ -76,10 +81,10 @@ class FeedForward(torch.nn.Module): """ Defines the computation performed at every call. - :param x: . - :type x: :class:`pina.LabelTensor` + :param x: The tensor to apply the forward pass. + :type x: torch.Tensor :return: the output computed by the model. - :rtype: LabelTensor + :rtype: torch.Tensor """ return self.model(x) @@ -93,18 +98,18 @@ class ResidualFeedForward(torch.nn.Module): .. seealso:: **Original reference**: Wang, Sifan, Yujun Teng, and Paris Perdikaris. - "Understanding and mitigating gradient flow pathologies in physics-informed - neural networks." SIAM Journal on Scientific Computing 43.5 (2021): A3055-A3081. + *Understanding and mitigating gradient flow pathologies in physics-informed + neural networks*. SIAM Journal on Scientific Computing 43.5 (2021): A3055-A3081. DOI: `10.1137/20M1318043 `_ :param int input_dimensions: The number of input components of the model. - Expected tensor shape of the form (*, input_dimensions), where * - means any number of dimensions including none. + Expected tensor shape of the form :math:`(*, d)`, where * + means any number of dimensions including none, and :math:`d` the ``input_dimensions``. :param int output_dimensions: The number of output components of the model. - Expected tensor shape of the form (*, output_dimensions), where * - means any number of dimensions including none. + Expected tensor shape of the form :math:`(*, d)`, where * + means any number of dimensions including none, and :math:`d` the ``output_dimensions``. :param int inner_size: number of neurons in the hidden layer(s). Default is 20. :param int n_layers: number of hidden layers. Default is 2. @@ -112,14 +117,21 @@ class ResidualFeedForward(torch.nn.Module): :class:`torch.nn.Module` is passed, this is used as activation function after any layers, except the last one. If a list of Modules is passed, they are used as activation functions at any layers, in order. - :param bool bias: If `True` the MLP will consider some bias. + :param bool bias: If ``True`` the MLP will consider some bias. :param list | tuple transformer_nets: a list or tuple containing the two torch.nn.Module which act as transformer network. The input dimension of the network must be the same as ``input_dimensions``, and the output dimension must be the same as ``inner_size``. """ - def __init__(self, input_dimensions, output_dimensions, inner_size=20, - n_layers=2, func=nn.Tanh, bias=True, transformer_nets=None): + + def __init__(self, + input_dimensions, + output_dimensions, + inner_size=20, + n_layers=2, + func=nn.Tanh, + bias=True, + transformer_nets=None): """ """ super().__init__() @@ -135,26 +147,37 @@ class ResidualFeedForward(torch.nn.Module): # check transformer nets if transformer_nets is None: transformer_nets = [ - EnhancedLinear(nn.Linear(in_features=input_dimensions, out_features=inner_size), - nn.Tanh()), - EnhancedLinear(nn.Linear(in_features=input_dimensions, out_features=inner_size), - nn.Tanh()) - ] + EnhancedLinear( + nn.Linear(in_features=input_dimensions, + out_features=inner_size), nn.Tanh()), + EnhancedLinear( + nn.Linear(in_features=input_dimensions, + out_features=inner_size), nn.Tanh()) + ] elif isinstance(transformer_nets, (list, tuple)): if len(transformer_nets) != 2: - raise ValueError('transformer_nets needs to be a list of len two.') + raise ValueError( + 'transformer_nets needs to be a list of len two.') for net in transformer_nets: if not isinstance(net, nn.Module): - raise ValueError('transformer_nets needs to be a list of torch.nn.Module.') + raise ValueError( + 'transformer_nets needs to be a list of torch.nn.Module.' + ) x = torch.rand(10, input_dimensions) try: out = net(x) except RuntimeError: - raise ValueError('transformer network input incompatible with input_dimensions.') + raise ValueError( + 'transformer network input incompatible with input_dimensions.' + ) if out.shape[-1] != inner_size: - raise ValueError('transformer network output incompatible with inner_size.') + raise ValueError( + 'transformer network output incompatible with inner_size.' + ) else: - RuntimeError('Runtime error for transformer nets, check official documentation.') + RuntimeError( + 'Runtime error for transformer nets, check official documentation.' + ) # assign variables self.input_dimension = input_dimensions @@ -170,9 +193,10 @@ class ResidualFeedForward(torch.nn.Module): self.layers = [] for i in range(len(tmp_layers) - 1): self.layers.append( - nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias) - ) - self.last_layer = nn.Linear(tmp_layers[len(tmp_layers) - 1], output_dimensions, bias=bias) + nn.Linear(tmp_layers[i], tmp_layers[i + 1], bias=bias)) + self.last_layer = nn.Linear(tmp_layers[len(tmp_layers) - 1], + output_dimensions, + bias=bias) if isinstance(func, list): self.functions = func() @@ -184,28 +208,27 @@ class ResidualFeedForward(torch.nn.Module): unique_list = [] for layer, func in zip(self.layers, self.functions): - unique_list.append(EnhancedLinear(layer=layer, - activation=func)) + unique_list.append(EnhancedLinear(layer=layer, activation=func)) self.inner_layers = torch.nn.Sequential(*unique_list) - + def forward(self, x): """ Defines the computation performed at every call. - :param x: . - :type x: :class:`pina.LabelTensor` + :param x: The tensor to apply the forward pass. + :type x: torch.Tensor :return: the output computed by the model. - :rtype: LabelTensor + :rtype: torch.Tensor """ # enhance the input with transformer input_ = [] for nets in self.transformer_nets: input_.append(nets(x)) - + # skip connections pass for layer in self.inner_layers.children(): x = layer(x) x = (1. - x) * input_[0] + x * input_[1] # last layer - return self.last_layer(x) \ No newline at end of file + return self.last_layer(x) diff --git a/pina/model/fno.py b/pina/model/fno.py index 9e70066..6384a65 100644 --- a/pina/model/fno.py +++ b/pina/model/fno.py @@ -17,22 +17,23 @@ class FNO(torch.nn.Module): .. seealso:: **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K., Liu, B., - Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). "Fourier neural operator for - parametric partial differential equations". + Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier neural operator for + parametric partial differential equations*. DOI: `arXiv preprint arXiv:2010.08895. `_ """ + def __init__(self, - lifting_net, + lifting_net, projecting_net, n_modes, - dimensions = 3, - padding = 8, - padding_type = "constant", - inner_size = 20, - n_layers = 2, - func = nn.Tanh, - layers = None): + dimensions=3, + padding=8, + padding_type="constant", + inner_size=20, + n_layers=2, + func=nn.Tanh, + layers=None): super().__init__() # check type consistency @@ -50,10 +51,11 @@ class FNO(torch.nn.Module): else: raise ValueError('layers must be tuple or list of int.') if not isinstance(n_modes, (list, tuple, int)): - raise ValueError('n_modes must be a int or list or tuple of valid modes.' - ' More information on the official documentation.') - - # assign variables + raise ValueError( + 'n_modes must be a int or list or tuple of valid modes.' + ' More information on the official documentation.') + + # assign variables # TODO check input lifting net and input projecting net self._lifting_net = lifting_net self._projecting_net = projecting_net @@ -78,15 +80,18 @@ class FNO(torch.nn.Module): # 2. Assign activation functions for each FNO layer if isinstance(func, list): if len(layers) != len(func): - raise RuntimeError('Uncosistent number of layers and functions.') + raise RuntimeError( + 'Uncosistent number of layers and functions.') self._functions = func else: self._functions = [func for _ in range(len(layers))] # 3. Assign modes functions for each FNO layer if isinstance(n_modes, list): - if all(isinstance(i, list) for i in n_modes) and len(layers) != len(n_modes): - raise RuntimeError('Uncosistent number of layers and functions.') + if all(isinstance(i, list) + for i in n_modes) and len(layers) != len(n_modes): + raise RuntimeError( + 'Uncosistent number of layers and functions.') elif all(isinstance(i, int) for i in n_modes): n_modes = [n_modes] * len(layers) else: @@ -105,8 +110,7 @@ class FNO(torch.nn.Module): fourier_layer(input_numb_fields=tmp_layers[i], output_numb_fields=tmp_layers[i + 1], n_modes=n_modes[i], - activation=self._functions[i]) - ) + activation=self._functions[i])) self._layers = nn.Sequential(*self._layers) # 5. Padding values for spectral conv @@ -114,9 +118,9 @@ class FNO(torch.nn.Module): padding = [padding] * dimensions self._ipad = [-pad if pad > 0 else None for pad in padding[:dimensions]] self._padding_type = padding_type - self._pad = [val for pair in zip([0]*dimensions, padding) for val in pair] - - + self._pad = [ + val for pair in zip([0] * dimensions, padding) for val in pair + ] def forward(self, x): """ @@ -136,9 +140,9 @@ class FNO(torch.nn.Module): # lifting the input in higher dimensional space x = self._lifting_net(x) - + # permuting the input [batch, channels, x, y, ...] - permutation_idx = [0, x.ndim-1, *[i for i in range(1, x.ndim-1)]] + permutation_idx = [0, x.ndim - 1, *[i for i in range(1, x.ndim - 1)]] x = x.permute(permutation_idx) # padding the input @@ -148,7 +152,7 @@ class FNO(torch.nn.Module): x = self._layers(x) # remove padding - idxs = [slice(None), slice(None)] + [slice(pad) for pad in self._ipad] + idxs = [slice(None), slice(None)] + [slice(pad) for pad in self._ipad] x = x[idxs] # permuting back [batch, x, y, ..., channels] diff --git a/pina/model/layers/convolution.py b/pina/model/layers/convolution.py index 0539dcd..3b9fdcd 100644 --- a/pina/model/layers/convolution.py +++ b/pina/model/layers/convolution.py @@ -10,10 +10,16 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta): Abstract class """ - def __init__(self, input_numb_field, output_numb_field, - filter_dim, stride, model=None, optimize=False, + def __init__(self, + input_numb_field, + output_numb_field, + filter_dim, + stride, + model=None, + optimize=False, no_overlap=False): - """Base Class for Continuous Convolution. + """ + Base Class for Continuous Convolution. The algorithm expects input to be in the form: $$[B \times N_{in} \times N \times D]$$ @@ -50,7 +56,7 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta): :param stride: stride for the filter :type stride: dict :param model: neural network for inner parametrization, - defaults to None + defaults to None. :type model: torch.nn.Module, optional :param optimize: flag for performing optimization on the continuous filter, defaults to False. The flag `optimize=True` should be @@ -114,37 +120,37 @@ class BaseContinuousConv(torch.nn.Module, metaclass=ABCMeta): self.transpose = self.transpose_overlap class DefaultKernel(torch.nn.Module): + def __init__(self, input_dim, output_dim): super().__init__() assert isinstance(input_dim, int) assert isinstance(output_dim, int) - self._model = torch.nn.Sequential( - torch.nn.Linear(input_dim, 20), - torch.nn.ReLU(), - torch.nn.Linear(20, 20), - torch.nn.ReLU(), - torch.nn.Linear(20, output_dim) - ) + self._model = torch.nn.Sequential(torch.nn.Linear(input_dim, 20), + torch.nn.ReLU(), + torch.nn.Linear(20, 20), + torch.nn.ReLU(), + torch.nn.Linear(20, output_dim)) + def forward(self, x): return self._model(x) - @ property + @property def net(self): return self._net - @ property + @property def stride(self): return self._stride - @ property + @property def filter_dim(self): return self._dim - @ property + @property def input_numb_field(self): return self._input_numb_field - @ property + @property def output_numb_field(self): return self._output_numb_field diff --git a/pina/model/layers/convolution_2d.py b/pina/model/layers/convolution_2d.py index 86677d1..4668796 100644 --- a/pina/model/layers/convolution_2d.py +++ b/pina/model/layers/convolution_2d.py @@ -9,76 +9,66 @@ class ContinuousConvBlock(BaseContinuousConv): """ Implementation of Continuous Convolutional operator. + The algorithm expects input to be in the form: + :math:`[B, N_{in}, N, D]` + where :math:`B` is the batch_size, :math:`N_{in}` is the number of input + fields, :math:`N` the number of points in the mesh, :math:`D` the dimension + of the problem. In particular: + + * :math:`D` is the number of spatial variables + 1. The last column must + contain the field value. For example for 2D problems :math:`D=3` and + the tensor will be something like ``[first coordinate, second + coordinate, field value]``. + * :math:`N_{in}` represents the number of vectorial function presented. + For example a vectorial function :math:`f = [f_1, f_2]` will have + :math:`N_{in}=2`. + .. seealso:: - **Original reference**: Coscia, D., Meneghetti, L., Demo, N., - Stabile, G., & Rozza, G.. (2022). A Continuous Convolutional Trainable - Filter for Modelling Unstructured Data. - DOI: `10.48550/arXiv.2210.13416 - `_. + **Original reference**: Coscia, D., Meneghetti, L., Demo, N. et al. + *A continuous convolutional trainable filter for modelling unstructured data*. + Comput Mech 72, 253–265 (2023). DOI ``_ """ - def __init__(self, input_numb_field, output_numb_field, - filter_dim, stride, model=None, optimize=False, + def __init__(self, + input_numb_field, + output_numb_field, + filter_dim, + stride, + model=None, + optimize=False, no_overlap=False): """ - - :param input_numb_field: Number of fields N_in in the input. + :param input_numb_field: Number of fields :math:`N_{in}` in the input. :type input_numb_field: int - :param output_numb_field: Number of fields N_out in the output. + :param output_numb_field: Number of fields :math:`N_{out}` in the output. :type output_numb_field: int :param filter_dim: Dimension of the filter. - :type filter_dim: tuple/ list + :type filter_dim: tuple(int) | list(int) :param stride: Stride for the filter. :type stride: dict :param model: Neural network for inner parametrization, - defaults to None. If None, a default multilayer perceptron - is used, see BaseContinuousConv.DefaultKernel. - :type model: torch.nn.Module, optional + defaults to ``None``. If None, a default multilayer perceptron + of width three and size twenty with ReLU activation is used. + :type model: torch.nn.Module :param optimize: Flag for performing optimization on the continuous filter, defaults to False. The flag `optimize=True` should be used only when the scatter datapoints are fixed through the - training. If torch model is in `.eval()` mode, the flag is + training. If torch model is in ``.eval()`` mode, the flag is automatically set to False always. - :type optimize: bool, optional + :type optimize: bool :param no_overlap: Flag for performing optimization on the transpose continuous filter, defaults to False. The flag set to `True` should be used only when the filter positions do not overlap for different strides. RuntimeError will raise in case of non-compatible strides. - :type no_overlap: bool, optional + :type no_overlap: bool .. note:: Using `optimize=True` the filter can be use either in `forward` or in `transpose` mode, not both. If `optimize=False` the same filter can be used for both `transpose` and `forward` modes. - .. warning:: - The algorithm expects input to be in the form: [B x N_in x N x D] - where B is the batch_size, N_in is the number of input - fields, N the number of points in the mesh, D the dimension - of the problem. In particular: - - * D is the number of spatial variables + 1. The last column must - contain the field value. For example for 2D problems D=3 and - the tensor will be something like `[first coordinate, second - coordinate, field value]`. - - * N_in represents the number of vectorial function presented. - For example a vectorial function f = [f_1, f_2] will have - N_in=2. - - The algorithm returns a tensor of shape: [B x N_out x N x D] - where B is the batch_size, N_out is the number of output - fields, N' the number of points in the mesh, D the dimension - of the problem (coordinates + field value). - - For example, a 2-dimensional vectorial function N_in=2 of - 3-dimensionalcinput D=3+1=4 with 100 points input mesh and batch - size of 8 is represented as a tensor `[8, 2, 100, 4]`, where the - columnsc`[:, 0, :, -1]` and `[:, 1, :, -1]` represent the first and - second filed value respectively. - :Example: >>> class MLP(torch.nn.Module): def __init__(self) -> None: @@ -140,11 +130,12 @@ class ContinuousConvBlock(BaseContinuousConv): self._stride = self._stride._stride_discrete def _spawn_networks(self, model): - """Private method to create a collection of kernels + """ + Private method to create a collection of kernels - :param model: a torch.nn.Module model in form of Object class + :param model: A :class:`torch.nn.Module` model in form of Object class. :type model: torch.nn.Module - :return: list of torch.nn.Module models + :return: List of :class:`torch.nn.Module` models. :rtype: torch.nn.ModuleList """ @@ -169,12 +160,13 @@ class ContinuousConvBlock(BaseContinuousConv): return torch.nn.ModuleList(nets) def _extract_mapped_points(self, batch_idx, index, x): - """Priviate method to extract mapped points in the filter + """ + Priviate method to extract mapped points in the filter - :param x: input tensor [channel x N x dim] - :type x: torch.tensor - :return: mapped points and indeces for each channel - :rtype: tuple(torch.tensor, list) + :param x: Input tensor of shape ``[channel, N, dim]`` + :type x: torch.Tensor + :return: Mapped points and indeces for each channel, + :rtype: torch.Tensor, list """ mapped_points = [] @@ -210,10 +202,11 @@ class ContinuousConvBlock(BaseContinuousConv): return stacked_input, indeces_channels def _find_index(self, X): - """Private method to extract indeces for convolution. + """ + Private method to extract indeces for convolution. - :param X: input tensor, as in ContinuousConv2D docstring - :type X: torch.tensor + :param X: Input tensor, as in ContinuousConvBlock ``__init__``. + :type X: torch.Tensor """ # append the index for each stride @@ -227,10 +220,11 @@ class ContinuousConvBlock(BaseContinuousConv): self._index = index def _make_grid_forward(self, X): - """Private method to create forward convolution grid. + """ + Private method to create forward convolution grid. - :param X: input tensor, as in ContinuousConv2D docstring - :type X: torch.tensor + :param X: Input tensor, as in ContinuousConvBlock docstring. + :type X: torch.Tensor """ # filter dimension + number of points in output grid @@ -238,10 +232,8 @@ class ContinuousConvBlock(BaseContinuousConv): number_points = len(self._stride) # initialize the grid - grid = torch.zeros(size=(X.shape[0], - self._output_numb_field, - number_points, - filter_dim + 1), + grid = torch.zeros(size=(X.shape[0], self._output_numb_field, + number_points, filter_dim + 1), device=X.device, dtype=X.dtype) grid[..., :-1] = (self._stride + self._dim * 0.5) @@ -250,10 +242,12 @@ class ContinuousConvBlock(BaseContinuousConv): self._grid = grid.detach() def _make_grid_transpose(self, X): - """Private method to create transpose convolution grid. + """ + Private method to create transpose convolution grid. + + :param X: Input tensor, as in ContinuousConvBlock docstring. + :type X: torch.Tensor - :param X: input tensor, as in ContinuousConv2D docstring - :type X: torch.tensor """ # initialize to all zeros @@ -264,13 +258,14 @@ class ContinuousConvBlock(BaseContinuousConv): self._grid_transpose = tmp def _make_grid(self, X, type): - """Private method to create convolution grid. + """ + Private method to create convolution grid. - :param X: input tensor, as in ContinuousConv2D docstring - :type X: torch.tensor - :param type: type of convolution, ['forward', 'inverse'] the - possibilities - :type type: string + :param X: Input tensor, as in ContinuousConvBlock docstring. + :type X: torch.Tensor + :param type: Type of convolution, ``['forward', 'inverse']`` the + possibilities. + :type type: str """ # choose the type of convolution @@ -282,16 +277,16 @@ class ContinuousConvBlock(BaseContinuousConv): raise TypeError def _initialize_convolution(self, X, type='forward'): - """Private method to intialize the convolution. + """ + Private method to intialize the convolution. The convolution is initialized by setting a grid and calculate the index for finding the points inside the filter. - :param X: input tensor, as in ContinuousConv2D docstring - :type X: torch.tensor - :param type: type of convolution, ['forward', 'inverse'] the - possibilities - :type type: string + :param X: Input tensor, as in ContinuousConvBlock docstring. + :type X: torch.Tensor + :param str type: type of convolution, ``['forward', 'inverse'] ``the + possibilities. """ # variable for the convolution @@ -301,12 +296,13 @@ class ContinuousConvBlock(BaseContinuousConv): self._find_index(X) def forward(self, X): - """Forward pass in the layer + """ + Forward pass in the convolutional layer. - :param x: input data (input_numb_field x N x filter_dim) - :type x: torch.tensor - :return: feed forward convolution (output_numb_field x N x filter_dim) - :rtype: torch.tensor + :param x: Input data for the convolution :math:`[B, N_{in}, N, D]`. + :type x: torch.Tensor + :return: Convolution output :math:`[B, N_{out}, N, D]`. + :rtype: torch.Tensor """ # initialize convolution @@ -353,34 +349,35 @@ class ContinuousConvBlock(BaseContinuousConv): # sum filters (for each input fields) in groups # for different ouput fields - conv[batch_idx, ..., -1] = res_tmp.reshape(self._output_numb_field, - self._input_numb_field, - -1).sum(1) + conv[batch_idx, ..., + -1] = res_tmp.reshape(self._output_numb_field, + self._input_numb_field, -1).sum(1) return conv def transpose_no_overlap(self, integrals, X): - """Transpose pass in the layer for no-overlapping filters + """ + Transpose pass in the layer for no-overlapping filters :param integrals: Weights for the transpose convolution. Shape - [B x N_in x N] - where B is the batch_size, N_in is the number of input - fields, N the number of points in the mesh, D the dimension + :math:`[B, N_{in}, N]` + where B is the batch_size, :math`N_{in}` is the number of input + fields, :math:`N` the number of points in the mesh, D the dimension of the problem. :type integral: torch.tensor :param X: Input data. Expect tensor of shape - [B x N_in x M x D] where B is the batch_size, - N_in is the number of input fields, M the number of points - in the mesh, D the dimension of the problem. Note, last column - :type X: torch.tensor + :math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size, + :math`N_{in}`is the number of input fields, :math:`M` the number of points + in the mesh, :math:`D` the dimension of the problem. + :type X: torch.Tensor :return: Feed forward transpose convolution. Tensor of shape - [B x N_out x N] where B is the batch_size, - N_out is the number of output fields, N the number of points - in the mesh, D the dimension of the problem. - :rtype: torch.tensor + :math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size, + :math`N_{out}`is the number of input fields, :math:`M` the number of points + in the mesh, :math:`D` the dimension of the problem. + :rtype: torch.Tensor .. note:: - This function is automatically called when `.transpose()` - method is used and `no_overlap=True` + This function is automatically called when ``.transpose()`` + method is used and ``no_overlap=True`` """ # initialize convolution @@ -437,27 +434,28 @@ class ContinuousConvBlock(BaseContinuousConv): return conv_transposed def transpose_overlap(self, integrals, X): - """Transpose pass in the layer for overlapping filters + """ + Transpose pass in the layer for overlapping filters :param integrals: Weights for the transpose convolution. Shape - [B x N_in x N] - where B is the batch_size, N_in is the number of input - fields, N the number of points in the mesh, D the dimension + :math:`[B, N_{in}, N]` + where B is the batch_size, :math`N_{in}` is the number of input + fields, :math:`N` the number of points in the mesh, D the dimension of the problem. :type integral: torch.tensor :param X: Input data. Expect tensor of shape - [B x N_in x M x D] where B is the batch_size, - N_in is the number of input fields, M the number of points - in the mesh, D the dimension of the problem. Note, last column - :type X: torch.tensor + :math:`[B, N_{in}, M, D]` where :math:`B` is the batch_size, + :math`N_{in}`is the number of input fields, :math:`M` the number of points + in the mesh, :math:`D` the dimension of the problem. + :type X: torch.Tensor :return: Feed forward transpose convolution. Tensor of shape - [B x N_out x N] where B is the batch_size, - N_out is the number of output fields, N the number of points - in the mesh, D the dimension of the problem. - :rtype: torch.tensor + :math:`[B, N_{out}, M, D]` where :math:`B` is the batch_size, + :math`N_{out}`is the number of input fields, :math:`M` the number of points + in the mesh, :math:`D` the dimension of the problem. + :rtype: torch.Tensor - .. note:: This function is automatically called when `.transpose()` - method is used and `no_overlap=False` + .. note:: This function is automatically called when ``.transpose()`` + method is used and ``no_overlap=False`` """ # initialize convolution @@ -473,8 +471,9 @@ class ContinuousConvBlock(BaseContinuousConv): # list to iterate for calculating nn output tmp = [i for i in range(self._output_numb_field)] - iterate_conv = [item for item in tmp for _ in range( - self._input_numb_field)] + iterate_conv = [ + item for item in tmp for _ in range(self._input_numb_field) + ] for batch_idx, x in enumerate(X): diff --git a/pina/model/layers/fourier.py b/pina/model/layers/fourier.py index 4333276..ef9e76a 100644 --- a/pina/model/layers/fourier.py +++ b/pina/model/layers/fourier.py @@ -13,14 +13,19 @@ class FourierBlock1D(nn.Module): .. seealso:: - **Original reference**: Li, Zongyi, et al. - "Fourier neural operator for parametric partial - differential equations." arXiv preprint - arXiv:2010.08895 (2020) - `_. + **Original reference**: Li, Z., Kovachki, N., Azizzadenesheli, K., Liu, B., + Bhattacharya, K., Stuart, A., & Anandkumar, A. (2020). *Fourier neural operator for + parametric partial differential equations*. + DOI: `arXiv preprint arXiv:2010.08895. + `_ """ - def __init__(self, input_numb_fields, output_numb_fields, n_modes, activation=torch.nn.Tanh): + + def __init__(self, + input_numb_fields, + output_numb_fields, + n_modes, + activation=torch.nn.Tanh): super().__init__() """ PINA implementation of Fourier block one dimension. The module computes @@ -43,12 +48,13 @@ class FourierBlock1D(nn.Module): check_consistency(activation(), nn.Module) # assign variables - self._spectral_conv = SpectralConvBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=n_modes) + self._spectral_conv = SpectralConvBlock1D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=n_modes) self._activation = activation() self._linear = nn.Conv1d(input_numb_fields, output_numb_fields, 1) - + def forward(self, x): """ Forward computation for Fourier Block. It performs a spectral @@ -74,13 +80,18 @@ class FourierBlock2D(nn.Module): .. seealso:: **Original reference**: Li, Zongyi, et al. - "Fourier neural operator for parametric partial - differential equations." arXiv preprint + *Fourier neural operator for parametric partial + differential equations*. arXiv preprint arXiv:2010.08895 (2020) `_. """ - def __init__(self, input_numb_fields, output_numb_fields, n_modes, activation=torch.nn.Tanh): + + def __init__(self, + input_numb_fields, + output_numb_fields, + n_modes, + activation=torch.nn.Tanh): """ PINA implementation of Fourier block two dimensions. The module computes the spectral convolution of the input with a linear kernel in the @@ -104,12 +115,13 @@ class FourierBlock2D(nn.Module): check_consistency(activation(), nn.Module) # assign variables - self._spectral_conv = SpectralConvBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=n_modes) + self._spectral_conv = SpectralConvBlock2D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=n_modes) self._activation = activation() self._linear = nn.Conv2d(input_numb_fields, output_numb_fields, 1) - + def forward(self, x): """ Forward computation for Fourier Block. It performs a spectral @@ -135,13 +147,18 @@ class FourierBlock3D(nn.Module): .. seealso:: **Original reference**: Li, Zongyi, et al. - "Fourier neural operator for parametric partial - differential equations." arXiv preprint + *Fourier neural operator for parametric partial + differential equations*. arXiv preprint arXiv:2010.08895 (2020) `_. """ - def __init__(self, input_numb_fields, output_numb_fields, n_modes, activation=torch.nn.Tanh): + + def __init__(self, + input_numb_fields, + output_numb_fields, + n_modes, + activation=torch.nn.Tanh): """ PINA implementation of Fourier block three dimensions. The module computes the spectral convolution of the input with a linear kernel in the @@ -166,12 +183,13 @@ class FourierBlock3D(nn.Module): check_consistency(activation(), nn.Module) # assign variables - self._spectral_conv = SpectralConvBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=n_modes) + self._spectral_conv = SpectralConvBlock3D( + input_numb_fields=input_numb_fields, + output_numb_fields=output_numb_fields, + n_modes=n_modes) self._activation = activation() self._linear = nn.Conv3d(input_numb_fields, output_numb_fields, 1) - + def forward(self, x): """ Forward computation for Fourier Block. It performs a spectral diff --git a/pina/model/layers/residual.py b/pina/model/layers/residual.py index ddbf730..6b3734c 100644 --- a/pina/model/layers/residual.py +++ b/pina/model/layers/residual.py @@ -9,17 +9,21 @@ class ResidualBlock(nn.Module): .. seealso:: **Original reference**: He, Kaiming, et al. - "Deep residual learning for image recognition." + *Deep residual learning for image recognition.* Proceedings of the IEEE conference on computer vision and pattern recognition. 2016.. - `_. + DOI: ``_. """ - def __init__(self, input_dim, output_dim, - hidden_dim, spectral_norm=False, + def __init__(self, + input_dim, + output_dim, + hidden_dim, + spectral_norm=False, activation=torch.nn.ReLU()): - """Residual block constructor + """ + Initializes the ResidualBlock module. :param int input_dim: Dimension of the input to pass to the feedforward linear layer. @@ -48,9 +52,9 @@ class ResidualBlock(nn.Module): self._activation = activation # create layers - self.l1 = self._spect_norm(nn.Linear(input_dim, hidden_dim)) - self.l2 = self._spect_norm(nn.Linear(hidden_dim, output_dim)) - self.l3 = self._spect_norm(nn.Linear(input_dim, output_dim)) + self._l1 = self._spect_norm(nn.Linear(input_dim, hidden_dim)) + self._l2 = self._spect_norm(nn.Linear(hidden_dim, output_dim)) + self._l3 = self._spect_norm(nn.Linear(input_dim, output_dim)) def forward(self, x): """Forward pass for residual block layer. @@ -59,9 +63,9 @@ class ResidualBlock(nn.Module): :return: Output tensor for the residual layer. :rtype: torch.Tensor """ - y = self.activation(self.l1(x)) - y = self.l2(y) - x = self.l3(x) + y = self._activation(self._l1(x)) + y = self._l2(y) + x = self._l3(x) return y + x def _spect_norm(self, x): @@ -74,32 +78,40 @@ class ResidualBlock(nn.Module): """ return nn.utils.spectral_norm(x) if self._spectral_norm else x - @ property - def spectral_norm(self): - return self._spectral_norm - @ property - def input_dim(self): - return self._input_dim - - @ property - def output_dim(self): - return self._output_dim - - @ property - def hidden_dim(self): - return self._hidden_dim - - @ property - def activation(self): - return self._activation - +import torch +import torch.nn as nn class EnhancedLinear(torch.nn.Module): """ - TODO + A wrapper class for enhancing a linear layer with activation and/or dropout. + + :param layer: The linear layer to be enhanced. + :type layer: torch.nn.Module + :param activation: The activation function to be applied after the linear layer. + :type activation: torch.nn.Module + :param dropout: The dropout probability to be applied after the activation (if provided). + :type dropout: float + + :Example: + + >>> linear_layer = torch.nn.Linear(10, 20) + >>> activation = torch.nn.ReLU() + >>> dropout_prob = 0.5 + >>> enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob) """ + def __init__(self, layer, activation=None, dropout=None): + """ + Initializes the EnhancedLinear module. + + :param layer: The linear layer to be enhanced. + :type layer: torch.nn.Module + :param activation: The activation function to be applied after the linear layer. + :type activation: torch.nn.Module + :param dropout: The dropout probability to be applied after the activation (if provided). + :type dropout: float + """ super().__init__() # check consistency @@ -108,23 +120,41 @@ class EnhancedLinear(torch.nn.Module): check_consistency(activation, nn.Module) if dropout is not None: check_consistency(dropout, float) - + # assign forward if (dropout is None) and (activation is None): self._model = torch.nn.Sequential(layer) elif (dropout is None) and (activation is not None): - self._model = torch.nn.Sequential(layer, - activation) - + self._model = torch.nn.Sequential(layer, activation) + elif (dropout is not None) and (activation is None): - self._model = torch.nn.Sequential(layer, - self._drop(dropout)) - + self._model = torch.nn.Sequential(layer, self._drop(dropout)) + elif (dropout is not None) and (activation is not None): - self._model = torch.nn.Sequential(layer, - activation, + self._model = torch.nn.Sequential(layer, activation, self._drop(dropout)) - + def forward(self, x): + """ + Forward pass through the enhanced linear module. + + :param x: Input tensor. + :type x: torch.Tensor + + :return: Output tensor after passing through the enhanced linear module. + :rtype: torch.Tensor + """ return self._model(x) + + def _drop(self, p): + """ + Applies dropout with probability p. + + :param p: Dropout probability. + :type p: float + + :return: Dropout layer with the specified probability. + :rtype: torch.nn.Dropout + """ + return torch.nn.Dropout(p) diff --git a/pina/model/layers/spectral.py b/pina/model/layers/spectral.py index e6a06f7..d86aa09 100644 --- a/pina/model/layers/spectral.py +++ b/pina/model/layers/spectral.py @@ -3,17 +3,17 @@ import torch.nn as nn from ...utils import check_consistency import warnings + ######## 1D Spectral Convolution ########### class SpectralConvBlock1D(nn.Module): """ - Implementation of Spectral Convolution Block for one - dimensional tensor. + PINA implementation of Spectral Convolution Block for one + dimensional tensors. """ def __init__(self, input_numb_fields, output_numb_fields, n_modes): """ - PINA implementation of spectral convolution. The module computes - the spectral convolution of the input with a linear kernel in the + The module computes the spectral convolution of the input with a linear kernel in the fourier space, and then it maps the input back to the physical space. @@ -37,22 +37,22 @@ class SpectralConvBlock1D(nn.Module): self._output_channels = output_numb_fields # scaling factor - scale = (1. / (self._input_channels * self._output_channels)) + scale = (1. / (self._input_channels * self._output_channels)) self._weights = nn.Parameter(scale * torch.rand(self._input_channels, self._output_channels, self._modes, dtype=torch.cfloat)) - + def _compute_mult1d(self, input, weights): """ Compute the matrix multiplication of the input with the linear kernel weights. :param input: The input tensor, expect of size - [batch, input_numb_fields, x]. + ``[batch, input_numb_fields, x]``. :type input: torch.Tensor :param weights: The kernel weights, expect of - size [input_numb_fields, output_numb_fields, x]. + size ``[input_numb_fields, output_numb_fields, x]``. :type weights: torch.Tensor :return: The matrix multiplication of the input with the linear kernel weights. @@ -65,10 +65,10 @@ class SpectralConvBlock1D(nn.Module): Forward computation for Spectral Convolution. :param x: The input tensor, expect of size - [batch, input_numb_fields, x]. + ``[batch, input_numb_fields, x]``. :type x: torch.Tensor :return: The output tensor obtained from the - spectral convolution of size [batch, output_numb_fields, x]. + spectral convolution of size ``[batch, output_numb_fields, x]``. :rtype: torch.Tensor """ batch_size = x.shape[0] @@ -82,23 +82,23 @@ class SpectralConvBlock1D(nn.Module): x.size(-1) // 2 + 1, device=x.device, dtype=torch.cfloat) - out_ft[:, :, :self._modes] = self._compute_mult1d(x_ft[:, :, :self._modes], self._weights) + out_ft[:, :, :self._modes] = self._compute_mult1d( + x_ft[:, :, :self._modes], self._weights) # Return to physical space return torch.fft.irfft(out_ft, n=x.size(-1)) - + ######## 2D Spectral Convolution ########### class SpectralConvBlock2D(nn.Module): """ - Implementation of spectral convolution block for two - dimensional tensor. + PINA implementation of spectral convolution block for two + dimensional tensors. """ def __init__(self, input_numb_fields, output_numb_fields, n_modes): """ - PINA implementation of spectral convolution. The module computes - the spectral convolution of the input with a linear kernel in the + The module computes the spectral convolution of the input with a linear kernel in the fourier space, and then it maps the input back to the physical space. @@ -118,17 +118,18 @@ class SpectralConvBlock2D(nn.Module): check_consistency(n_modes, int) if isinstance(n_modes, (tuple, list)): if len(n_modes) != 2: - raise ValueError('Expected n_modes to be a list or tuple of len two, ' - 'with each entry corresponding to the number of modes ' - 'for each dimension ') + raise ValueError( + 'Expected n_modes to be a list or tuple of len two, ' + 'with each entry corresponding to the number of modes ' + 'for each dimension ') elif isinstance(n_modes, int): - n_modes = [n_modes]*2 + n_modes = [n_modes] * 2 else: - raise ValueError('Expected n_modes to be a list or tuple of len two, ' - 'with each entry corresponding to the number of modes ' - 'for each dimension; or an int value representing the ' - 'number of modes for all dimensions') - + raise ValueError( + 'Expected n_modes to be a list or tuple of len two, ' + 'with each entry corresponding to the number of modes ' + 'for each dimension; or an int value representing the ' + 'number of modes for all dimensions') # assign variables self._modes = n_modes @@ -136,7 +137,7 @@ class SpectralConvBlock2D(nn.Module): self._output_channels = output_numb_fields # scaling factor - scale = (1. / (self._input_channels * self._output_channels)) + scale = (1. / (self._input_channels * self._output_channels)) self._weights1 = nn.Parameter(scale * torch.rand(self._input_channels, self._output_channels, self._modes[0], @@ -147,17 +148,17 @@ class SpectralConvBlock2D(nn.Module): self._modes[0], self._modes[1], dtype=torch.cfloat)) - + def _compute_mult2d(self, input, weights): """ Compute the matrix multiplication of the input with the linear kernel weights. :param input: The input tensor, expect of size - [batch, input_numb_fields, x, y]. + ``[batch, input_numb_fields, x, y]``. :type input: torch.Tensor :param weights: The kernel weights, expect of - size [input_numb_fields, output_numb_fields, x, y]. + size ``[input_numb_fields, output_numb_fields, x, y]``. :type weights: torch.Tensor :return: The matrix multiplication of the input with the linear kernel weights. @@ -170,10 +171,10 @@ class SpectralConvBlock2D(nn.Module): Forward computation for Spectral Convolution. :param x: The input tensor, expect of size - [batch, input_numb_fields, x]. + ``[batch, input_numb_fields, x, y]``. :type x: torch.Tensor :return: The output tensor obtained from the - spectral convolution of size [batch, output_numb_fields, x]. + spectral convolution of size ``[batch, output_numb_fields, x, y]``. :rtype: torch.Tensor """ @@ -186,29 +187,28 @@ class SpectralConvBlock2D(nn.Module): out_ft = torch.zeros(batch_size, self._output_channels, x.size(-2), - x.size(-1)//2 + 1, + x.size(-1) // 2 + 1, device=x.device, dtype=torch.cfloat) - out_ft[:, :, :self._modes[0], :self._modes[1]] = self._compute_mult2d(x_ft[:, :, :self._modes[0], :self._modes[1]], - self._weights1) - out_ft[:, :, -self._modes[0]:, :self._modes[1]:] = self._compute_mult2d(x_ft[:, :, -self._modes[0]:, :self._modes[1]], - self._weights2) + out_ft[:, :, :self._modes[0], :self._modes[1]] = self._compute_mult2d( + x_ft[:, :, :self._modes[0], :self._modes[1]], self._weights1) + out_ft[:, :, -self._modes[0]:, :self._modes[1]:] = self._compute_mult2d( + x_ft[:, :, -self._modes[0]:, :self._modes[1]], self._weights2) # Return to physical space return torch.fft.irfft2(out_ft, s=(x.size(-2), x.size(-1))) -######## 2D Spectral Convolution ########### +######## 3D Spectral Convolution ########### class SpectralConvBlock3D(nn.Module): """ - Implementation of spectral convolution block for two - dimensional tensor. + PINA implementation of spectral convolution block for three + dimensional tensors. """ def __init__(self, input_numb_fields, output_numb_fields, n_modes): """ - PINA implementation of spectral convolution. The module computes - the spectral convolution of the input with a linear kernel in the + The module computes the spectral convolution of the input with a linear kernel in the fourier space, and then it maps the input back to the physical space. @@ -229,16 +229,18 @@ class SpectralConvBlock3D(nn.Module): check_consistency(n_modes, int) if isinstance(n_modes, (tuple, list)): if len(n_modes) != 3: - raise ValueError('Expected n_modes to be a list or tuple of len three, ' - 'with each entry corresponding to the number of modes ' - 'for each dimension ') + raise ValueError( + 'Expected n_modes to be a list or tuple of len three, ' + 'with each entry corresponding to the number of modes ' + 'for each dimension ') elif isinstance(n_modes, int): - n_modes = [n_modes]*3 + n_modes = [n_modes] * 3 else: - raise ValueError('Expected n_modes to be a list or tuple of len three, ' - 'with each entry corresponding to the number of modes ' - 'for each dimension; or an int value representing the ' - 'number of modes for all dimensions') + raise ValueError( + 'Expected n_modes to be a list or tuple of len three, ' + 'with each entry corresponding to the number of modes ' + 'for each dimension; or an int value representing the ' + 'number of modes for all dimensions') # assign variables self._modes = n_modes @@ -246,7 +248,7 @@ class SpectralConvBlock3D(nn.Module): self._output_channels = output_numb_fields # scaling factor - scale = (1. / (self._input_channels * self._output_channels)) + scale = (1. / (self._input_channels * self._output_channels)) self._weights1 = nn.Parameter(scale * torch.rand(self._input_channels, self._output_channels, self._modes[0], @@ -271,17 +273,17 @@ class SpectralConvBlock3D(nn.Module): self._modes[1], self._modes[2], dtype=torch.cfloat)) - + def _compute_mult3d(self, input, weights): """ Compute the matrix multiplication of the input with the linear kernel weights. :param input: The input tensor, expect of size - [batch, input_numb_fields, x, y]. + ``[batch, input_numb_fields, x, y, z]``. :type input: torch.Tensor :param weights: The kernel weights, expect of - size [input_numb_fields, output_numb_fields, x, y]. + size ``[input_numb_fields, output_numb_fields, x, y, z]``. :type weights: torch.Tensor :return: The matrix multiplication of the input with the linear kernel weights. @@ -294,10 +296,10 @@ class SpectralConvBlock3D(nn.Module): Forward computation for Spectral Convolution. :param x: The input tensor, expect of size - [batch, input_numb_fields, x]. + ``[batch, input_numb_fields, x, y, z]``. :type x: torch.Tensor :return: The output tensor obtained from the - spectral convolution of size [batch, output_numb_fields, x]. + spectral convolution of size ``[batch, output_numb_fields, x, y, z]``. :rtype: torch.Tensor """ @@ -311,42 +313,45 @@ class SpectralConvBlock3D(nn.Module): self._output_channels, x.size(-3), x.size(-2), - x.size(-1)//2 + 1, + x.size(-1) // 2 + 1, device=x.device, dtype=torch.cfloat) - - slice0 = (slice(None), - slice(None), - slice(self._modes[0]), - slice(self._modes[1]), - slice(self._modes[2]), - ) + + slice0 = ( + slice(None), + slice(None), + slice(self._modes[0]), + slice(self._modes[1]), + slice(self._modes[2]), + ) out_ft[slice0] = self._compute_mult3d(x_ft[slice0], self._weights1) - slice1 = (slice(None), - slice(None), - slice(self._modes[0]), - slice(-self._modes[1], None), - slice(self._modes[2]), - ) + slice1 = ( + slice(None), + slice(None), + slice(self._modes[0]), + slice(-self._modes[1], None), + slice(self._modes[2]), + ) out_ft[slice1] = self._compute_mult3d(x_ft[slice1], self._weights2) - slice2 = (slice(None), - slice(None), - slice(-self._modes[0], None), - slice(self._modes[1]), - slice(self._modes[2]), - ) + slice2 = ( + slice(None), + slice(None), + slice(-self._modes[0], None), + slice(self._modes[1]), + slice(self._modes[2]), + ) out_ft[slice2] = self._compute_mult3d(x_ft[slice2], self._weights3) - slice3 = (slice(None), - slice(None), - slice(-self._modes[0], None), - slice(-self._modes[1], None), - slice(self._modes[2]), - ) + slice3 = ( + slice(None), + slice(None), + slice(-self._modes[0], None), + slice(-self._modes[1], None), + slice(self._modes[2]), + ) out_ft[slice3] = self._compute_mult3d(x_ft[slice3], self._weights4) # Return to physical space return torch.fft.irfftn(out_ft, s=(x.size(-3), x.size(-2), x.size(-1))) - diff --git a/pina/model/layers/stride.py b/pina/model/layers/stride.py index 02143e8..6facb01 100644 --- a/pina/model/layers/stride.py +++ b/pina/model/layers/stride.py @@ -68,11 +68,13 @@ class Stride(object): direction[i] = 1 # creating the stride grid - values_mesh = [torch.arange(0, i, step).float() - for i, step in zip(domain, jumps)] + values_mesh = [ + torch.arange(0, i, step).float() for i, step in zip(domain, jumps) + ] - values_mesh = [single * dim for single, - dim in zip(values_mesh, direction)] + values_mesh = [ + single * dim for single, dim in zip(values_mesh, direction) + ] mesh = torch.meshgrid(values_mesh) coordinates_mesh = [x.reshape(-1, 1) for x in mesh] diff --git a/pina/model/layers/utils_convolution.py b/pina/model/layers/utils_convolution.py index c1e6a4e..a2df5fe 100644 --- a/pina/model/layers/utils_convolution.py +++ b/pina/model/layers/utils_convolution.py @@ -3,8 +3,8 @@ import torch def check_point(x, current_stride, dim): max_stride = current_stride + dim - indeces = torch.logical_and(x[..., :-1] < max_stride, - x[..., :-1] >= current_stride).all(dim=-1) + indeces = torch.logical_and(x[..., :-1] < max_stride, x[..., :-1] + >= current_stride).all(dim=-1) return indeces diff --git a/pina/model/multi_feed_forward.py b/pina/model/multi_feed_forward.py index 2773696..20d0240 100644 --- a/pina/model/multi_feed_forward.py +++ b/pina/model/multi_feed_forward.py @@ -12,8 +12,9 @@ class MultiFeedForward(torch.nn.Module): together. The user has to define the `forward` method choosing how to combine the different FeedForward networks. - :param dict dff_dict: dictionary of FeedForward networks. + :param dict ffn_dict: dictionary of FeedForward networks. """ + def __init__(self, ffn_dict): super().__init__() diff --git a/pina/model/network.py b/pina/model/network.py index ff9dce5..f27d5b2 100644 --- a/pina/model/network.py +++ b/pina/model/network.py @@ -4,10 +4,19 @@ from ..utils import check_consistency class Network(torch.nn.Module): - """ Network class with starndard forward method - and possibility to pass extra features.""" def __init__(self, model, extra_features=None): + """ + Network class with standard forward method + and possibility to pass extra features. This + class is used internally in PINA to convert + any :class:`torch.nn.Module` s in a PINA module. + + :param model: The torch model to convert in a PINA model. + :type model: torch.nn.Module + :param extra_features: List of torch models to augment the input, defaults to None. + :type extra_features: list(torch.nn.Module) + """ super().__init__() # check model consistency @@ -27,12 +36,15 @@ class Network(torch.nn.Module): def forward(self, x): """ - Forward method for Network class. This class - implements the standard forward method, and + Forward method for Network class. This class + implements the standard forward method, and it adds the possibility to pass extra features. + All the PINA models ``forward`` s are overriden + by this class, to enable :class:`pina.label_tensor.LabelTensor` labels + extraction. - :param torch.tensor x: input of the network - :return torch.tensor: output of the network + :param torch.Tensor x: Input of the network. + :return torch.Tensor: Output of the network. """ # extract features and append for feature in self._extra_features: diff --git a/pina/operators.py b/pina/operators.py index cb5358e..563188c 100644 --- a/pina/operators.py +++ b/pina/operators.py @@ -1,4 +1,10 @@ -"""Module for operators vectorize implementation""" +""" +Module for operators vectorize implementation. Differential operators are used to write any differential problem. +These operators are implemented to work on different accellerators: CPU, GPU, TPU or MPS. +All operators take as input a tensor onto which computing the operator, a tensor with respect +to which computing the operator, the name of the output variables to calculate the operator +for (in case of multidimensional functions), and the variables name on which the operator is calculated. +""" import torch from pina.label_tensor import LabelTensor @@ -48,15 +54,15 @@ def grad(output_, input_, components=None, d=None): raise RuntimeError('derivative labels missing from input tensor') output_fieldname = output_.labels[0] - gradients = torch.autograd.grad( - output_, - input_, - grad_outputs=torch.ones(output_.size(), dtype=output_.dtype, - device=output_.device), - create_graph=True, - retain_graph=True, - allow_unused=True - )[0] + gradients = torch.autograd.grad(output_, + input_, + grad_outputs=torch.ones( + output_.size(), + dtype=output_.dtype, + device=output_.device), + create_graph=True, + retain_graph=True, + allow_unused=True)[0] gradients.labels = input_.labels gradients = gradients.extract(d) @@ -188,11 +194,13 @@ def laplacian(output_, input_, components=None, d=None, method='std'): result = torch.zeros(output_.shape[0], 1, device=output_.device) for i, label in enumerate(grad_output.labels): gg = grad(grad_output, input_, d=d, components=[label]) - result[:, 0] += super(torch.Tensor, gg.T).__getitem__(i) # TODO improve + result[:, 0] += super(torch.Tensor, + gg.T).__getitem__(i) # TODO improve labels = [f'dd{components[0]}'] else: - result = torch.empty(input_.shape[0], len(components), + result = torch.empty(input_.shape[0], + len(components), device=output_.device) labels = [None] * len(components) for idx, (ci, di) in enumerate(zip(components, d)): @@ -237,8 +245,8 @@ def advection(output_, input_, velocity_field, components=None, d=None): if components is None: components = output_.labels - tmp = grad(output_, input_, components, d - ).reshape(-1, len(components), len(d)).transpose(0, 1) + tmp = grad(output_, input_, components, d).reshape(-1, len(components), + len(d)).transpose(0, 1) tmp *= output_.extract(velocity_field) return tmp.sum(dim=2).T diff --git a/pina/plotter.py b/pina/plotter.py index 179a025..1c2c478 100644 --- a/pina/plotter.py +++ b/pina/plotter.py @@ -1,6 +1,5 @@ """ Module for plotting. """ - import matplotlib.pyplot as plt import torch from pina.callbacks import MetricTracker @@ -14,9 +13,9 @@ class Plotter: def plot_samples(self, problem, variables=None, **kwargs): """ - Plot the training grid samples. + Plot the training grid samples. - :param SolverInterface solver: The SolverInterface object. + :param SolverInterface solver: The ``SolverInterface`` object. :param list(str) variables: Variables to plot. If None, all variables are plotted. If 'spatial', only spatial variables are plotted. If 'temporal', only temporal variables are plotted. Defaults to None. @@ -44,11 +43,13 @@ class Plotter: proj = '3d' if len(variables) == 3 else None ax = fig.add_subplot(projection=proj) for location in problem.input_pts: - coords = problem.input_pts[location].extract( - variables).T.detach() + coords = problem.input_pts[location].extract(variables).T.detach() if coords.shape[0] == 1: # 1D samples - ax.plot(coords.flatten(), torch.zeros(coords.flatten().shape), '.', - label=location, **kwargs) + ax.plot(coords.flatten(), + torch.zeros(coords.flatten().shape), + '.', + label=location, + **kwargs) else: ax.plot(*coords, '.', label=location, **kwargs) @@ -92,13 +93,19 @@ class Plotter: plt.legend() plt.show() - def _2d_plot(self, pts, pred, v, res, method, truth_solution=None, + def _2d_plot(self, + pts, + pred, + v, + res, + method, + truth_solution=None, **kwargs): """Plot solution for two dimensional function :param pts: Points to plot the solution. :type pts: torch.Tensor - :param pred: SolverInterface solution evaluated at 'pts'. + :param pred: ``SolverInterface`` solution evaluated at 'pts'. :type pred: torch.Tensor :param method: Matplotlib method to plot 2-dimensional data, see https://matplotlib.org/stable/api/axes_api.html for @@ -116,32 +123,39 @@ class Plotter: truth_output = truth_solution(pts).float().reshape(res, res) fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(16, 6)) - cb = getattr(ax[0], method)( - *grids, pred_output.cpu().detach(), **kwargs) + cb = getattr(ax[0], method)(*grids, pred_output.cpu().detach(), + **kwargs) fig.colorbar(cb, ax=ax[0]) ax[0].title.set_text('Neural Network prediction') - cb = getattr(ax[1], method)( - *grids, truth_output.cpu().detach(), **kwargs) + cb = getattr(ax[1], method)(*grids, truth_output.cpu().detach(), + **kwargs) fig.colorbar(cb, ax=ax[1]) ax[1].title.set_text('True solution') - cb = getattr(ax[2], method)(*grids, - (truth_output-pred_output).cpu().detach(), - **kwargs) + cb = getattr(ax[2], + method)(*grids, + (truth_output - pred_output).cpu().detach(), + **kwargs) fig.colorbar(cb, ax=ax[2]) ax[2].title.set_text('Residual') else: fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 6)) - cb = getattr(ax, method)( - *grids, pred_output.cpu().detach(), **kwargs) + cb = getattr(ax, method)(*grids, pred_output.cpu().detach(), + **kwargs) fig.colorbar(cb, ax=ax) ax.title.set_text('Neural Network prediction') - def plot(self, solver, components=None, fixed_variables={}, method='contourf', - res=256, filename=None, **kwargs): + def plot(self, + solver, + components=None, + fixed_variables={}, + method='contourf', + res=256, + filename=None, + **kwargs): """ Plot sample of SolverInterface output. - :param SolverInterface solver: The SolverInterface object instance. + :param SolverInterface solver: The ``SolverInterface`` object instance. :param list(str) components: The output variable to plot. If None, all the output variables of the problem are selected. Default value is None. @@ -149,8 +163,9 @@ class Plotter: should be kept fixed during the plot. The keys of the dictionary are the variables name whereas the values are the corresponding values of the variables. Defaults is `dict()`. - :param {'contourf', 'pcolor'} method: The matplotlib method to use for - plotting the solution. Default is 'contourf'. + :param str method: The matplotlib method to use for + plotting the solution. Available methods are {'contourf', 'pcolor'}. + Default is 'contourf'. :param int res: The resolution, aka the number of points used for plotting in each axis. Default is 256. :param str filename: The file name to save the plot. If None, the plot @@ -184,8 +199,8 @@ class Plotter: self._1d_plot(pts, predicted_output, method, truth_solution, **kwargs) elif len(v) == 2: - self._2d_plot(pts, predicted_output, v, res, method, - truth_solution, **kwargs) + self._2d_plot(pts, predicted_output, v, res, method, truth_solution, + **kwargs) plt.tight_layout() if filename: @@ -193,12 +208,19 @@ class Plotter: else: plt.show() - def plot_loss(self, trainer, metrics=None, logy = False, logx=False, filename=None, **kwargs): + def plot_loss(self, + trainer, + metrics=None, + logy=False, + logx=False, + filename=None, + **kwargs): """ Plot the loss function values during traininig. - :param Trainer trainer: the PINA Trainer object instance. - :param str/list(str) metric: The metrics to use in the y axis. If None, the mean loss + :param trainer: the PINA Trainer object instance. + :type trainer: Trainer + :param str | list(str) metric: The metrics to use in the y axis. If None, the mean loss is plotted. :param bool logy: If True, the y axis is in log scale. Default is True. @@ -209,10 +231,14 @@ class Plotter: """ # check that MetricTracker has been used - list_ = [idx for idx, s in enumerate(trainer.callbacks) if isinstance(s, MetricTracker)] + list_ = [ + idx for idx, s in enumerate(trainer.callbacks) + if isinstance(s, MetricTracker) + ] if not bool(list_): - raise FileNotFoundError('MetricTracker should be used as a callback during training to' - ' use this method.') + raise FileNotFoundError( + 'MetricTracker should be used as a callback during training to' + ' use this method.') # extract trainer metrics trainer_metrics = trainer.callbacks[list_[0]].metrics @@ -220,11 +246,13 @@ class Plotter: metrics = ['mean_loss'] elif not isinstance(metrics, list): raise ValueError('metrics must be class list.') - + # loop over metrics to plot for metric in metrics: if metric not in trainer_metrics: - raise ValueError(f'{metric} not a valid metric. Available metrics are {list(trainer_metrics.keys())}.') + raise ValueError( + f'{metric} not a valid metric. Available metrics are {list(trainer_metrics.keys())}.' + ) loss = trainer_metrics[metric] epochs = range(len(loss)) plt.plot(epochs, loss, **kwargs) diff --git a/pina/problem/abstract_problem.py b/pina/problem/abstract_problem.py index 95fdb65..1bea870 100644 --- a/pina/problem/abstract_problem.py +++ b/pina/problem/abstract_problem.py @@ -27,7 +27,7 @@ class AbstractProblem(metaclass=ABCMeta): # put in self.input_pts all the points that we don't need to sample self._span_condition_points() - + @property def input_variables(self): """ @@ -55,10 +55,11 @@ class AbstractProblem(metaclass=ABCMeta): def domain(self): """ The domain(s) where the conditions of the AbstractProblem are valid. + If more than one domain type is passed, a list of Location is + retured. - :return: the domain(s) of self - :rtype: list (if more than one domain are defined), - `Span` domain (of only one domain is defined) + :return: the domain(s) of ``self`` + :rtype: list[Location] """ domains = [ getattr(self, f'{t}_domain') @@ -109,7 +110,11 @@ class AbstractProblem(metaclass=ABCMeta): self.input_pts[condition_name] = samples self._have_sampled_points[condition_name] = True - def discretise_domain(self, n, mode = 'random', variables = 'all', locations = 'all'): + def discretise_domain(self, + n, + mode='random', + variables='all', + locations='all'): """ Generate a set of points to span the `Location` of all the conditions of the problem. @@ -122,9 +127,9 @@ class AbstractProblem(metaclass=ABCMeta): latin hypercube sampling, ``latin`` or ``lh``; chebyshev sampling, ``chebyshev``; grid sampling ``grid``. :param variables: problem's variables to be sampled, defaults to 'all'. - :type variables: str or list[str], optional + :type variables: str | list[str] :param locations: problem's locations from where to sample, defaults to 'all'. - :type locations: str, optional + :type locations: str :Example: >>> pinn.discretise_domain(n=10, mode='grid') @@ -146,24 +151,24 @@ class AbstractProblem(metaclass=ABCMeta): check_consistency(mode, str) if mode not in ['random', 'grid', 'lh', 'chebyshev', 'latin']: raise TypeError(f'mode {mode} not valid.') - + # check consistency variables if variables == 'all': variables = self.input_variables else: check_consistency(variables, str) - - if sorted(variables) != sorted(self.input_variables): + + if sorted(variables) != sorted(self.input_variables): TypeError(f'Wrong variables for sampling. Variables ', f'should be in {self.input_variables}.') - + # check consistency location if locations == 'all': locations = [condition for condition in self.conditions] else: check_consistency(locations, str) - if sorted(locations) != sorted(self.conditions): + if sorted(locations) != sorted(self.conditions): TypeError(f'Wrong locations for sampling. Location ', f'should be in {self.conditions}.') @@ -174,7 +179,7 @@ class AbstractProblem(metaclass=ABCMeta): # we try to check if we have already sampled try: already_sampled = [self.input_pts[location]] - # if we have not sampled, a key error is thrown + # if we have not sampled, a key error is thrown except KeyError: already_sampled = [] @@ -187,16 +192,15 @@ class AbstractProblem(metaclass=ABCMeta): self._have_sampled_points[location] = False # build samples - samples = [condition.location.sample( - n=n, - mode=mode, - variables=variables) - ] + already_sampled + samples = [ + condition.location.sample(n=n, mode=mode, variables=variables) + ] + already_sampled pts = merge_tensors(samples) self.input_pts[location] = pts # the condition is sampled if input_pts contains all labels - if sorted(self.input_pts[location].labels) == sorted(self.input_variables): + if sorted(self.input_pts[location].labels) == sorted( + self.input_variables): self._have_sampled_points[location] = True def add_points(self, new_points): @@ -207,21 +211,22 @@ class AbstractProblem(metaclass=ABCMeta): and values the torch.Tensor points. """ - if sorted(new_points.keys()) != sorted(self.conditions): + if sorted(new_points.keys()) != sorted(self.conditions): TypeError(f'Wrong locations for new points. Location ', f'should be in {self.conditions}.') - + for location in new_points.keys(): # extract old and new points old_pts = self.input_pts[location] new_pts = new_points[location] # if they don't have the same variables error - if sorted(old_pts.labels) != sorted(new_pts.labels): + if sorted(old_pts.labels) != sorted(new_pts.labels): TypeError(f'Not matching variables for old and new points ' f'in condition {location}.') if old_pts.labels != new_pts.labels: - new_pts = torch.hstack([new_pts.extract([i]) for i in old_pts.labels]) + new_pts = torch.hstack( + [new_pts.extract([i]) for i in old_pts.labels]) new_pts.labels = old_pts.labels # merging @@ -233,13 +238,14 @@ class AbstractProblem(metaclass=ABCMeta): def have_sampled_points(self): """ Check if all points for - ``'Location'`` are sampled. - """ + ``Location`` are sampled. + """ return all(self._have_sampled_points.values()) - + @property def not_sampled_points(self): - """Check which points are + """ + Check which points are not sampled. """ # variables which are not sampled @@ -251,4 +257,3 @@ class AbstractProblem(metaclass=ABCMeta): if not is_sample: not_sampled.append(condition_name) return not_sampled - diff --git a/pina/problem/parametric_problem.py b/pina/problem/parametric_problem.py index f498379..17b8ed6 100644 --- a/pina/problem/parametric_problem.py +++ b/pina/problem/parametric_problem.py @@ -16,14 +16,17 @@ class ParametricProblem(AbstractProblem): :Example: >>> from pina.problem import SpatialProblem, ParametricProblem >>> from pina.operators import grad - >>> from pina import Condition, Span + >>> from pina.equations import Equation, FixedValue + >>> from pina import Condition + >>> from pina.geometry import CartesianDomain >>> import torch >>> + >>> >>> class ParametricODE(SpatialProblem, ParametricProblem): >>> >>> output_variables = ['u'] - >>> spatial_domain = Span({'x': [0, 1]}) - >>> parameter_domain = Span({'alpha': [1, 10]}) + >>> spatial_domain = CartesianDomain({'x': [0, 1]}) + >>> parameter_domain = CartesianDomain({'alpha': [1, 10]}) >>> >>> def ode_equation(input_, output_): >>> u_x = grad(output_, input_, components=['u'], d=['x']) @@ -31,14 +34,9 @@ class ParametricProblem(AbstractProblem): >>> alpha = input_.extract(['alpha']) >>> return alpha * u_x - u >>> - >>> def initial_condition(input_, output_): - >>> value = 1.0 - >>> u = output_.extract(['u']) - >>> return u - value - >>> >>> conditions = { - >>> 'x0': Condition(Span({'x': 0, 'alpha':[1, 10]}), initial_condition), - >>> 'D': Condition(Span({'x': [0, 1], 'alpha':[1, 10]}), ode_equation)} + >>> 'x0': Condition(CartesianDomain({'x': 0, 'alpha':[1, 10]}), FixedValue(1.)), + >>> 'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))} """ @abstractmethod diff --git a/pina/problem/spatial_problem.py b/pina/problem/spatial_problem.py index 001373b..a0483e3 100644 --- a/pina/problem/spatial_problem.py +++ b/pina/problem/spatial_problem.py @@ -14,24 +14,25 @@ class SpatialProblem(AbstractProblem): :Example: >>> from pina.problem import SpatialProblem >>> from pina.operators import grad - >>> from pina import Condition, Span + >>> from pina.equations import Equation, FixedValue + >>> from pina import Condition + >>> from pina.geometry import CartesianDomain >>> import torch - >>> class SimpleODE(SpatialProblem): + >>> + >>> + >>> class SpatialODE(SpatialProblem: + >>> >>> output_variables = ['u'] - >>> spatial_domain = Span({'x': [0, 1]}) + >>> spatial_domain = CartesianDomain({'x': [0, 1]}) + >>> >>> def ode_equation(input_, output_): >>> u_x = grad(output_, input_, components=['u'], d=['x']) >>> u = output_.extract(['u']) >>> return u_x - u - >>> - >>> def initial_condition(input_, output_): - >>> value = 1.0 - >>> u = output_.extract(['u']) - >>> return u - value >>> >>> conditions = { - >>> 'x0': Condition(Span({'x': 0.}), initial_condition), - >>> 'D': Condition(Span({'x': [0, 1]}), ode_equation)} + >>> 'x0': Condition(CartesianDomain({'x': 0, 'alpha':[1, 10]}), FixedValue(1.)), + >>> 'D': Condition(CartesianDomain({'x': [0, 1], 'alpha':[1, 10]}), Equation(ode_equation))} """ diff --git a/pina/problem/timedep_problem.py b/pina/problem/timedep_problem.py index 7750414..2d0179e 100644 --- a/pina/problem/timedep_problem.py +++ b/pina/problem/timedep_problem.py @@ -14,14 +14,17 @@ class TimeDependentProblem(AbstractProblem): :Example: >>> from pina.problem import SpatialProblem, TimeDependentProblem >>> from pina.operators import grad, laplacian - >>> from pina import Condition, Span + >>> from pina.equations import Equation, FixedValue + >>> from pina import Condition + >>> from pina.geometry import CartesianDomain >>> import torch >>> + >>> >>> class Wave(TimeDependentSpatialProblem): >>> >>> output_variables = ['u'] - >>> spatial_domain = Span({'x': [0, 3]}) - >>> temporal_domain = Span({'t': [0, 1]}) + >>> spatial_domain = CartesianDomain({'x': [0, 3]}) + >>> temporal_domain = CartesianDomain({'t': [0, 1]}) >>> >>> def wave_equation(input_, output_): >>> u_t = grad(output_, input_, components=['u'], d=['t']) @@ -29,10 +32,6 @@ class TimeDependentProblem(AbstractProblem): >>> delta_u = laplacian(output_, input_, components=['u'], d=['x']) >>> return delta_u - u_tt >>> - >>> def nil_dirichlet(input_, output_): - >>> value = 0.0 - >>> return output_.extract(['u']) - value - >>> >>> def initial_condition(input_, output_): >>> u_expected = (-3*torch.sin(2*torch.pi*input_.extract(['x'])) >>> + 5*torch.sin(8/3*torch.pi*input_.extract(['x']))) @@ -40,10 +39,10 @@ class TimeDependentProblem(AbstractProblem): >>> return u - u_expected >>> >>> conditions = { - >>> 't0': Condition(Span({'x': [0, 3], 't':0}), initial_condition), - >>> 'gamma1': Condition(Span({'x':0, 't':[0, 1]}), nil_dirichlet), - >>> 'gamma2': Condition(Span({'x':3, 't':[0, 1]}), nil_dirichlet), - >>> 'D': Condition(Span({'x': [0, 3], 't':[0, 1]}), wave_equation)} + >>> 't0': Condition(CartesianDomain({'x': [0, 3], 't':0}), Equation(initial_condition)), + >>> 'gamma1': Condition(CartesianDomain({'x':0, 't':[0, 1]}), FixedValue(0.)), + >>> 'gamma2': Condition(CartesianDomain({'x':3, 't':[0, 1]}), FixedValue(0.)), + >>> 'D': Condition(CartesianDomain({'x': [0, 3], 't':[0, 1]}), Equation(wave_equation))} """ diff --git a/pina/solvers/__init__.py b/pina/solvers/__init__.py index 39fbc2e..fdb6219 100644 --- a/pina/solvers/__init__.py +++ b/pina/solvers/__init__.py @@ -1,8 +1,12 @@ __all__ = [ 'PINN', 'GAROM', + 'SupervisedSolver', + 'SolverInterface' + ] from .garom import GAROM from .pinn import PINN from .supervised import SupervisedSolver +from .solver import SolverInterface diff --git a/pina/solvers/garom.py b/pina/solvers/garom.py index ddc16a3..972f156 100644 --- a/pina/solvers/garom.py +++ b/pina/solvers/garom.py @@ -4,7 +4,7 @@ import torch try: from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0 except ImportError: - from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0 + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0 from torch.optim.lr_scheduler import ConstantLR from .solver import SolverInterface @@ -22,28 +22,36 @@ class GAROM(SolverInterface): .. seealso:: **Original reference**: Coscia, D., Demo, N., & Rozza, G. (2023). - Generative Adversarial Reduced Order Modelling. - arXiv preprint arXiv:2305.15881. + *Generative Adversarial Reduced Order Modelling*. + DOI: `arXiv preprint arXiv:2305.15881. `_. """ - def __init__(self, - problem, - generator, - discriminator, - extra_features=None, - loss = None, - optimizer_generator=torch.optim.Adam, - optimizer_generator_kwargs={'lr' : 0.001}, - optimizer_discriminator=torch.optim.Adam, - optimizer_discriminator_kwargs={'lr' : 0.001}, - scheduler_generator=ConstantLR, - scheduler_generator_kwargs={"factor": 1, "total_iters": 0}, - scheduler_discriminator=ConstantLR, - scheduler_discriminator_kwargs={"factor": 1, "total_iters": 0}, - gamma = 0.3, - lambda_k = 0.001, - regularizer = False, - ): + + def __init__( + self, + problem, + generator, + discriminator, + extra_features=None, + loss=None, + optimizer_generator=torch.optim.Adam, + optimizer_generator_kwargs={'lr': 0.001}, + optimizer_discriminator=torch.optim.Adam, + optimizer_discriminator_kwargs={'lr': 0.001}, + scheduler_generator=ConstantLR, + scheduler_generator_kwargs={ + "factor": 1, + "total_iters": 0 + }, + scheduler_discriminator=ConstantLR, + scheduler_discriminator_kwargs={ + "factor": 1, + "total_iters": 0 + }, + gamma=0.3, + lambda_k=0.001, + regularizer=False, + ): """ :param AbstractProblem problem: The formualation of the problem. :param torch.nn.Module generator: The neural network model to use @@ -77,11 +85,11 @@ class GAROM(SolverInterface): rate scheduler for the discriminator. :param dict scheduler_discriminator_kwargs: LR scheduler constructor keyword args. :param gamma: Ratio of expected loss for generator and discriminator, defaults to 0.3. - :type gamma: float, optional + :type gamma: float :param lambda_k: Learning rate for control theory optimization, defaults to 0.001. - :type lambda_k: float, optional + :type lambda_k: float :param regularizer: Regularization term in the GAROM loss, defaults to False. - :type regularizer: bool, optional + :type regularizer: bool .. warning:: The algorithm works only for data-driven model. Hence in the ``problem`` definition @@ -90,22 +98,27 @@ class GAROM(SolverInterface): """ if isinstance(extra_features, dict): - extra_features = [extra_features['generator'], extra_features['discriminator']] + extra_features = [ + extra_features['generator'], extra_features['discriminator'] + ] + + super().__init__( + models=[generator, discriminator], + problem=problem, + extra_features=extra_features, + optimizers=[optimizer_generator, optimizer_discriminator], + optimizers_kwargs=[ + optimizer_generator_kwargs, optimizer_discriminator_kwargs + ]) - super().__init__(models=[generator, discriminator], - problem=problem, - extra_features=extra_features, - optimizers=[optimizer_generator, optimizer_discriminator], - optimizers_kwargs=[optimizer_generator_kwargs, optimizer_discriminator_kwargs]) - # set automatic optimization for GANs self.automatic_optimization = False # set loss if loss is None: loss = PowerLoss(p=1) - - # check consistency + + # check consistency check_consistency(scheduler_generator, LRScheduler, subclass=True) check_consistency(scheduler_generator_kwargs, dict) check_consistency(scheduler_discriminator, LRScheduler, subclass=True) @@ -134,6 +147,20 @@ class GAROM(SolverInterface): self.regularizer = float(regularizer) def forward(self, x, mc_steps=20, variance=False): + """ + Forward step for GAROM solver + + :param x: The input tensor. + :type x: torch.Tensor + :param mc_steps: Number of montecarlo samples to approximate the + expected value, defaults to 20. + :type mc_steps: int + :param variance: Returining also the sample variance of the solution, defaults to False. + :type variance: bool + :return: The expected value of the generator distribution. If ``variance=True`` also the + sample variance is returned. + :rtype: torch.Tensor | tuple(torch.Tensor, torch.Tensor) + """ # sampling field_sample = [self.sample(x) for _ in range(mc_steps)] @@ -147,10 +174,11 @@ class GAROM(SolverInterface): return mean, var return mean - + def configure_optimizers(self): - """Optimizer configuration for the GAROM - solver. + """ + Optimizer configuration for the GAROM + solver. :return: The optimizers and the schedulers :rtype: tuple(list, list) @@ -220,7 +248,7 @@ class GAROM(SolverInterface): return diff def training_step(self, batch, batch_idx): - """PINN solver training step. + """GAROM solver training step. :param batch: The batch element in the dataloader. :type batch: tuple @@ -265,27 +293,27 @@ class GAROM(SolverInterface): self.log('stability_metric', float(d_loss_real + torch.abs(diff)), prog_bar=True, logger=True, on_epoch=True, on_step=False) return - + @property def generator(self): return self.models[0] - + @property def discriminator(self): return self.models[1] - + @property def optimizer_generator(self): return self.optimizers[0] - + @property def optimizer_discriminator(self): return self.optimizers[1] - + @property def scheduler_generator(self): return self._schedulers[0] - + @property def scheduler_discriminator(self): return self._schedulers[1] diff --git a/pina/solvers/pinn.py b/pina/solvers/pinn.py index e986bb6..828b36d 100644 --- a/pina/solvers/pinn.py +++ b/pina/solvers/pinn.py @@ -3,7 +3,7 @@ import torch try: from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0 except ImportError: - from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0 + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0 from torch.optim.lr_scheduler import ConstantLR @@ -13,7 +13,6 @@ from ..utils import check_consistency from ..loss import LossInterface from torch.nn.modules.loss import _Loss - torch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732 @@ -30,27 +29,31 @@ class PINN(SolverInterface): Physics-informed machine learning. Nature Reviews Physics, 3(6), 422-440. `_. """ - def __init__(self, - problem, - model, - extra_features=None, - loss = torch.nn.MSELoss(), - optimizer=torch.optim.Adam, - optimizer_kwargs={'lr' : 0.001}, - scheduler=ConstantLR, - scheduler_kwargs={"factor": 1, "total_iters": 0}, - ): + + def __init__( + self, + problem, + model, + extra_features=None, + loss=torch.nn.MSELoss(), + optimizer=torch.optim.Adam, + optimizer_kwargs={'lr': 0.001}, + scheduler=ConstantLR, + scheduler_kwargs={ + "factor": 1, + "total_iters": 0 + }, + ): ''' :param AbstractProblem problem: The formualation of the problem. :param torch.nn.Module model: The neural network model to use. :param torch.nn.Module loss: The loss function used as minimizer, - default torch.nn.MSELoss(). + default :class:`torch.nn.MSELoss`. :param torch.nn.Module extra_features: The additional input features to use as augmented input. :param torch.optim.Optimizer optimizer: The neural network optimizer to - use; default is `torch.optim.Adam`. + use; default is :class:`torch.optim.Adam`. :param dict optimizer_kwargs: Optimizer constructor keyword args. - :param float lr: The learning rate; default is 0.001. :param torch.optim.LRScheduler scheduler: Learning rate scheduler. :param dict scheduler_kwargs: LR scheduler constructor keyword args. @@ -60,8 +63,8 @@ class PINN(SolverInterface): optimizers=[optimizer], optimizers_kwargs=[optimizer_kwargs], extra_features=extra_features) - - # check consistency + + # check consistency check_consistency(scheduler, LRScheduler, subclass=True) check_consistency(scheduler_kwargs, dict) check_consistency(loss, (LossInterface, _Loss), subclass=False) @@ -71,14 +74,14 @@ class PINN(SolverInterface): self._loss = loss self._neural_net = self.models[0] - def forward(self, x): - """Forward pass implementation for the PINN - solver. + """ + Forward pass implementation for the PINN + solver. - :param torch.tensor x: Input data. + :param torch.Tensor x: Input tensor. :return: PINN solution. - :rtype: torch.tensor + :rtype: torch.Tensor """ # extract labels x = x.extract(self.problem.input_variables) @@ -89,8 +92,9 @@ class PINN(SolverInterface): return output def configure_optimizers(self): - """Optimizer configuration for the PINN - solver. + """ + Optimizer configuration for the PINN + solver. :return: The optimizers and the schedulers :rtype: tuple(list, list) @@ -107,7 +111,8 @@ class PINN(SolverInterface): def training_step(self, batch, batch_idx): - """PINN solver training step. + """ + PINN solver training step. :param batch: The batch element in the dataloader. :type batch: tuple @@ -159,17 +164,17 @@ class PINN(SolverInterface): Scheduler for the PINN training. """ return self._scheduler - + @property def neural_net(self): """ Neural network for the PINN training. """ return self._neural_net - + @property def loss(self): """ Loss for the PINN training. """ - return self._loss \ No newline at end of file + return self._loss diff --git a/pina/solvers/solver.py b/pina/solvers/solver.py index c205840..698e088 100644 --- a/pina/solvers/solver.py +++ b/pina/solvers/solver.py @@ -2,30 +2,38 @@ from abc import ABCMeta, abstractmethod from ..model.network import Network -import pytorch_lightning as pl +import pytorch_lightning from ..utils import check_consistency from ..problem import AbstractProblem import torch -class SolverInterface(pl.LightningModule, metaclass=ABCMeta): - """ Solver base class. """ +class SolverInterface(pytorch_lightning.LightningModule, metaclass=ABCMeta): + """ + Solver base class. This class inherits is a wrapper of + LightningModule class, inheriting all the + LightningModule methods. + """ + def __init__(self, models, problem, - optimizers, - optimizers_kwargs, + optimizers, + optimizers_kwargs, extra_features=None): """ :param models: A torch neural network model instance. :type models: torch.nn.Module :param problem: A problem definition instance. :type problem: AbstractProblem - :param list(torch.nn.Module) extra_features: the additional input - features to use as augmented input. If ``None`` no extra features - are passed. If it is a list of ``torch.nn.Module``, the extra feature - list is passed to all models. If it is a list of extra features' lists, - each single list of extra feature is passed to a model. + :param list(torch.optim.Optimizer) optimizer: A list of neural network optimizers to + use. + :param list(dict) optimizer_kwargs: A list of optimizer constructor keyword args. + :param list(torch.nn.Module) extra_features: The additional input + features to use as augmented input. If ``None`` no extra features + are passed. If it is a list of :class:`torch.nn.Module`, the extra feature + list is passed to all models. If it is a list of extra features' lists, + each single list of extra feature is passed to a model. """ super().__init__() @@ -52,37 +60,40 @@ class SolverInterface(pl.LightningModule, metaclass=ABCMeta): raise ValueError('You must define one optimizer for each model.' f'Got {len_model} models, and {len_optimizer}' ' optimizers.') - + # check length consistency optimizers kwargs if len_optimizer_kwargs != len_optimizer: raise ValueError('You must define one dictionary of keyword' ' arguments for each optimizers.' f'Got {len_optimizer} optimizers, and' f' {len_optimizer_kwargs} dicitionaries') - + # extra features handling - if extra_features is None: + if extra_features is None: extra_features = [None] * len_model else: # if we only have a list of extra features if not isinstance(extra_features[0], (tuple, list)): extra_features = [extra_features] * len_model - else: # if we have a list of list extra features + else: # if we have a list of list extra features if len(extra_features) != len_model: - raise ValueError('You passed a list of extrafeatures list with len' - f'different of models len. Expected {len_model} ' - f'got {len(extra_features)}. If you want to use' - 'the same list of extra features for all models, ' - 'just pass a list of extrafeatures and not a list ' - 'of list of extra features.') - + raise ValueError( + 'You passed a list of extrafeatures list with len' + f'different of models len. Expected {len_model} ' + f'got {len(extra_features)}. If you want to use' + 'the same list of extra features for all models, ' + 'just pass a list of extrafeatures and not a list ' + 'of list of extra features.') + # assigning model and optimizers self._pina_models = [] self._pina_optimizers = [] for idx in range(len_model): - model_ = Network(model=models[idx], extra_features=extra_features[idx]) - optim_ = optimizers[idx](model_.parameters(), **optimizers_kwargs[idx]) + model_ = Network(model=models[idx], + extra_features=extra_features[idx]) + optim_ = optimizers[idx](model_.parameters(), + **optimizers_kwargs[idx]) self._pina_models.append(model_) self._pina_optimizers.append(optim_) @@ -90,9 +101,9 @@ class SolverInterface(pl.LightningModule, metaclass=ABCMeta): self._pina_problem = problem @abstractmethod - def forward(self): + def forward(self, *args, **kwargs): pass - + @abstractmethod def training_step(self): pass @@ -131,4 +142,4 @@ class SolverInterface(pl.LightningModule, metaclass=ABCMeta): # """ # Set the problem formulation.""" # check_consistency(problem, AbstractProblem, 'pina problem') - # self._problem = problem \ No newline at end of file + # self._problem = problem diff --git a/pina/solvers/supervised.py b/pina/solvers/supervised.py index be86b6e..2b33405 100644 --- a/pina/solvers/supervised.py +++ b/pina/solvers/supervised.py @@ -3,7 +3,7 @@ import torch try: from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0 except ImportError: - from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0 + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler # torch < 2.0 from torch.optim.lr_scheduler import ConstantLR @@ -19,25 +19,30 @@ class SupervisedSolver(SolverInterface): SupervisedSolver solver class. This class implements a SupervisedSolver, using a user specified ``model`` to solve a specific ``problem``. """ - def __init__(self, - problem, - model, - extra_features=None, - loss = torch.nn.MSELoss(), - optimizer=torch.optim.Adam, - optimizer_kwargs={'lr' : 0.001}, - scheduler=ConstantLR, - scheduler_kwargs={"factor": 1, "total_iters": 0}, - ): + + def __init__( + self, + problem, + model, + extra_features=None, + loss=torch.nn.MSELoss(), + optimizer=torch.optim.Adam, + optimizer_kwargs={'lr': 0.001}, + scheduler=ConstantLR, + scheduler_kwargs={ + "factor": 1, + "total_iters": 0 + }, + ): ''' :param AbstractProblem problem: The formualation of the problem. :param torch.nn.Module model: The neural network model to use. :param torch.nn.Module loss: The loss function used as minimizer, - default torch.nn.MSELoss(). + default :class:`torch.nn.MSELoss`. :param torch.nn.Module extra_features: The additional input features to use as augmented input. :param torch.optim.Optimizer optimizer: The neural network optimizer to - use; default is `torch.optim.Adam`. + use; default is :class:`torch.optim.Adam`. :param dict optimizer_kwargs: Optimizer constructor keyword args. :param float lr: The learning rate; default is 0.001. :param torch.optim.LRScheduler scheduler: Learning @@ -49,8 +54,8 @@ class SupervisedSolver(SolverInterface): optimizers=[optimizer], optimizers_kwargs=[optimizer_kwargs], extra_features=extra_features) - - # check consistency + + # check consistency check_consistency(scheduler, LRScheduler, subclass=True) check_consistency(scheduler_kwargs, dict) check_consistency(loss, (LossInterface, _Loss), subclass=False) @@ -60,13 +65,12 @@ class SupervisedSolver(SolverInterface): self._loss = loss self._neural_net = self.models[0] - def forward(self, x): """Forward pass implementation for the solver. - :param torch.tensor x: Input data. + :param torch.Tensor x: Input tensor. :return: Solver solution. - :rtype: torch.tensor + :rtype: torch.Tensor """ # extract labels x = x.extract(self.problem.input_variables) @@ -83,7 +87,7 @@ class SupervisedSolver(SolverInterface): :rtype: tuple(list, list) """ return self.optimizers, [self.scheduler] - + def training_step(self, batch, batch_idx): """Solver training step. @@ -105,9 +109,11 @@ class SupervisedSolver(SolverInterface): # data loss if hasattr(condition, 'output_points'): input_pts, output_pts = samples - loss = self.loss(self.forward(input_pts), output_pts) * condition.data_weight + loss = self.loss(self.forward(input_pts), + output_pts) * condition.data_weight else: - raise RuntimeError('Supervised solver works only in data-driven mode.') + raise RuntimeError( + 'Supervised solver works only in data-driven mode.') self.log('mean_loss', float(loss), prog_bar=True, logger=True) return loss @@ -118,17 +124,17 @@ class SupervisedSolver(SolverInterface): Scheduler for training. """ return self._scheduler - + @property def neural_net(self): """ Neural network for training. """ return self._neural_net - + @property def loss(self): """ Loss for training. """ - return self._loss \ No newline at end of file + return self._loss diff --git a/pina/trainer.py b/pina/trainer.py index fc2a938..bef8b80 100644 --- a/pina/trainer.py +++ b/pina/trainer.py @@ -1,17 +1,35 @@ -""" Solver module. """ +""" Trainer module. """ -from pytorch_lightning import Trainer +import pytorch_lightning from .utils import check_consistency from .dataset import SamplePointDataset, SamplePointLoader, DataPointDataset from .solvers.solver import SolverInterface -class Trainer(Trainer): +class Trainer(pytorch_lightning.Trainer): def __init__(self, solver, batch_size=None, **kwargs): + """ + PINA Trainer class for costumizing every aspect of training via flags. + + :param solver: A pina:class:`SolverInterface` solver for the differential problem. + :type solver: SolverInterface + :param batch_size: How many samples per batch to load. If ``batch_size=None`` all + samples are loaded and data are not batched, defaults to None. + :type batch_size: int | None + + :Keyword Arguments: + The additional keyword arguments specify the training setup + and can be choosen from the `pytorch-lightning + Trainer API `_ + """ + super().__init__(**kwargs) - # check inheritance consistency for solver + # check inheritance consistency for solver and batch size check_consistency(solver, SolverInterface) + if batch_size is not None: + check_consistency(batch_size, int) + self._model = solver self.batch_size = batch_size @@ -45,7 +63,7 @@ class Trainer(Trainer): def train(self, **kwargs): """ - Train the solver. + Train the solver method. """ return super().fit(self._model, train_dataloaders=self._loader, **kwargs) @@ -54,4 +72,4 @@ class Trainer(Trainer): """ Returning trainer solver. """ - return self._model \ No newline at end of file + return self._model diff --git a/pina/utils.py b/pina/utils.py index bff9d7b..b21499b 100644 --- a/pina/utils.py +++ b/pina/utils.py @@ -39,7 +39,9 @@ def check_consistency(object, object_instance, subclass=False): raise ValueError(f"{type(obj).__name__} must be {object_instance}.") -def number_parameters(model, aggregate=True, only_trainable=True): # TODO: check +def number_parameters(model, + aggregate=True, + only_trainable=True): # TODO: check """ Return the number of parameters of a given `model`. @@ -140,6 +142,7 @@ def chebyshev_roots(n): nodes = torch.sort(torch.cos(pi * (k + 0.5) / n))[0] return nodes + # class PinaDataset(): # def __init__(self, pinn) -> None: @@ -187,7 +190,6 @@ def chebyshev_roots(n): # return self._len - class LabelTensorDataLoader(DataLoader): def collate_fn(self, data): diff --git a/pina/writer.py b/pina/writer.py index a556c03..36d1999 100644 --- a/pina/writer.py +++ b/pina/writer.py @@ -11,10 +11,7 @@ class Writer: Implementation of a writer class, for textual output. """ - def __init__(self, - frequency_print=10, - header='any') -> None: - + def __init__(self, frequency_print=10, header='any') -> None: """ The constructor of the class. @@ -25,7 +22,6 @@ class Writer: self._frequency_print = frequency_print self._header = header - def header(self, trainer): """ The method for printing the header. @@ -42,7 +38,6 @@ class Writer: """ pass - def write_loss_in_loop(self, trainer, loss): """ The method for writing the output within the training loop. diff --git a/tests/test_callbacks/test_adaptive_refinment_callbacks.py b/tests/test_callbacks/test_adaptive_refinment_callbacks.py index 1144025..8964e46 100644 --- a/tests/test_callbacks/test_adaptive_refinment_callbacks.py +++ b/tests/test_callbacks/test_adaptive_refinment_callbacks.py @@ -1,5 +1,3 @@ - - from pina.callbacks import R3Refinement import torch import pytest @@ -7,7 +5,8 @@ import pytest from pina.problem import SpatialProblem from pina.operators import laplacian from pina.geometry import CartesianDomain -from pina import Condition, LabelTensor, PINN +from pina import Condition, LabelTensor +from pina.solvers import PINN from pina.trainer import Trainer from pina.model import FeedForward from pina.equation.equation import Equation @@ -15,15 +14,17 @@ from pina.equation.equation_factory import FixedValue def laplace_equation(input_, output_): - force_term = (torch.sin(input_.extract(['x'])*torch.pi) * - torch.sin(input_.extract(['y'])*torch.pi)) + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) delta_u = laplacian(output_.extract(['u']), input_) return delta_u - force_term + my_laplace = Equation(laplace_equation) in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y']) out_ = LabelTensor(torch.tensor([[0.]]), ['u']) + class Poisson(SpatialProblem): output_variables = ['u'] spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) @@ -55,7 +56,8 @@ poisson_problem = Poisson() boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] n = 10 poisson_problem.discretise_domain(n, 'grid', locations=boundaries) -model = FeedForward(len(poisson_problem.input_variables),len(poisson_problem.output_variables)) +model = FeedForward(len(poisson_problem.input_variables), + len(poisson_problem.output_variables)) # make the solver solver = PINN(problem=poisson_problem, model=model) @@ -64,8 +66,10 @@ solver = PINN(problem=poisson_problem, model=model) def test_r3constructor(): R3Refinement(sample_every=10) + def test_r3refinment_routine(): # make the trainer - trainer = Trainer(solver=solver, callbacks=[R3Refinement(sample_every=1)], max_epochs=5) + trainer = Trainer(solver=solver, + callbacks=[R3Refinement(sample_every=1)], + max_epochs=5) trainer.train() - diff --git a/tests/test_callbacks/test_optimizer_callbacks.py b/tests/test_callbacks/test_optimizer_callbacks.py index 9250ae1..6c167b6 100644 --- a/tests/test_callbacks/test_optimizer_callbacks.py +++ b/tests/test_callbacks/test_optimizer_callbacks.py @@ -1,5 +1,3 @@ - - from pina.callbacks import SwitchOptimizer import torch import pytest @@ -7,7 +5,8 @@ import pytest from pina.problem import SpatialProblem from pina.operators import laplacian from pina.geometry import CartesianDomain -from pina import Condition, LabelTensor, PINN +from pina import Condition, LabelTensor +from pina.solvers import PINN from pina.trainer import Trainer from pina.model import FeedForward from pina.equation.equation import Equation @@ -15,15 +14,17 @@ from pina.equation.equation_factory import FixedValue def laplace_equation(input_, output_): - force_term = (torch.sin(input_.extract(['x'])*torch.pi) * - torch.sin(input_.extract(['y'])*torch.pi)) + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) delta_u = laplacian(output_.extract(['u']), input_) return delta_u - force_term + my_laplace = Equation(laplace_equation) in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y']) out_ = LabelTensor(torch.tensor([[0.]]), ['u']) + class Poisson(SpatialProblem): output_variables = ['u'] spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) @@ -55,7 +56,8 @@ poisson_problem = Poisson() boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] n = 10 poisson_problem.discretise_domain(n, 'grid', locations=boundaries) -model = FeedForward(len(poisson_problem.input_variables),len(poisson_problem.output_variables)) +model = FeedForward(len(poisson_problem.input_variables), + len(poisson_problem.output_variables)) # make the solver solver = PINN(problem=poisson_problem, model=model) @@ -63,19 +65,24 @@ solver = PINN(problem=poisson_problem, model=model) def test_switch_optimizer_constructor(): SwitchOptimizer(new_optimizers=torch.optim.Adam, - new_optimizers_kwargs={'lr':0.01}, + new_optimizers_kwargs={'lr': 0.01}, epoch_switch=10) - + with pytest.raises(ValueError): SwitchOptimizer(new_optimizers=[torch.optim.Adam, torch.optim.Adam], - new_optimizers_kwargs=[{'lr':0.01}], + new_optimizers_kwargs=[{ + 'lr': 0.01 + }], epoch_switch=10) def test_switch_optimizer_routine(): # make the trainer - trainer = Trainer(solver=solver, callbacks=[SwitchOptimizer(new_optimizers=torch.optim.LBFGS, - new_optimizers_kwargs={'lr':0.01}, - epoch_switch=3)], max_epochs=5) + trainer = Trainer(solver=solver, + callbacks=[ + SwitchOptimizer(new_optimizers=torch.optim.LBFGS, + new_optimizers_kwargs={'lr': 0.01}, + epoch_switch=3) + ], + max_epochs=5) trainer.train() - diff --git a/tests/test_condition.py b/tests/test_condition.py index cb82ab6..23c9d12 100644 --- a/tests/test_condition.py +++ b/tests/test_condition.py @@ -1,17 +1,19 @@ import torch import pytest -from pina import LabelTensor, Condition, CartesianDomain, PINN +from pina import LabelTensor, Condition +from pina.solvers import PINN +from pina.geometry import CartesianDomain from pina.problem import SpatialProblem from pina.model import FeedForward from pina.operators import laplacian from pina.equation.equation_factory import FixedValue - example_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ['x', 'y', 'z']) example_output_pts = LabelTensor(torch.tensor([[1, 2]]), ['a', 'b']) + def test_init_inputoutput(): Condition(input_points=example_input_pts, output_points=example_output_pts) with pytest.raises(ValueError): @@ -21,8 +23,9 @@ def test_init_inputoutput(): with pytest.raises(TypeError): Condition(input_points=example_domain, output_points=example_domain) + def test_init_locfunc(): - Condition(location=example_domain, equation=FixedValue(0.0)) + Condition(location=example_domain, equation=FixedValue(0.0)) with pytest.raises(ValueError): Condition(example_domain, FixedValue(0.0)) with pytest.raises(TypeError): @@ -30,6 +33,7 @@ def test_init_locfunc(): with pytest.raises(TypeError): Condition(location=example_input_pts, equation=example_output_pts) + def test_init_inputfunc(): Condition(input_points=example_input_pts, equation=FixedValue(0.0)) with pytest.raises(ValueError): diff --git a/tests/test_equations/test_equation.py b/tests/test_equations/test_equation.py index 924513e..aed4b09 100644 --- a/tests/test_equations/test_equation.py +++ b/tests/test_equations/test_equation.py @@ -4,21 +4,25 @@ from pina import LabelTensor import torch import pytest + def eq1(input_, output_): u_grad = grad(output_, input_) u1_xx = grad(u_grad, input_, components=['du1dx'], d=['x']) u2_xy = grad(u_grad, input_, components=['du2dx'], d=['y']) - return torch.hstack([u1_xx , u2_xy]) + return torch.hstack([u1_xx, u2_xy]) + def eq2(input_, output_): - force_term = (torch.sin(input_.extract(['x'])*torch.pi) * - torch.sin(input_.extract(['y'])*torch.pi)) + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) delta_u = laplacian(output_.extract(['u1']), input_) return delta_u - force_term + def foo(): pass + def test_constructor(): Equation(eq1) Equation(eq2) @@ -27,6 +31,7 @@ def test_constructor(): with pytest.raises(ValueError): Equation(foo()) + def test_residual(): eq_1 = Equation(eq1) eq_2 = Equation(eq2) diff --git a/tests/test_equations/test_systemequation.py b/tests/test_equations/test_systemequation.py index 406a479..ae6825b 100644 --- a/tests/test_equations/test_systemequation.py +++ b/tests/test_equations/test_systemequation.py @@ -4,21 +4,25 @@ from pina import LabelTensor import torch import pytest + def eq1(input_, output_): u_grad = grad(output_, input_) u1_xx = grad(u_grad, input_, components=['du1dx'], d=['x']) u2_xy = grad(u_grad, input_, components=['du2dx'], d=['y']) - return torch.hstack([u1_xx , u2_xy]) + return torch.hstack([u1_xx, u2_xy]) + def eq2(input_, output_): - force_term = (torch.sin(input_.extract(['x'])*torch.pi) * - torch.sin(input_.extract(['y'])*torch.pi)) + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) delta_u = laplacian(output_.extract(['u1']), input_) return delta_u - force_term + def foo(): pass + def test_constructor(): SystemEquation([eq1, eq2]) SystemEquation([eq1, eq2], reduction='sum') @@ -27,6 +31,7 @@ def test_constructor(): with pytest.raises(ValueError): SystemEquation(foo) + def test_residual(): pts = LabelTensor(torch.rand(10, 2), labels=['x', 'y']) diff --git a/tests/test_geometry/test_cartesian.py b/tests/test_geometry/test_cartesian.py index 405c50d..3e7a8c9 100644 --- a/tests/test_geometry/test_cartesian.py +++ b/tests/test_geometry/test_cartesian.py @@ -1,12 +1,7 @@ import torch -import pytest - -from pina import LabelTensor, Condition, CartesianDomain, PINN -from pina.problem import SpatialProblem -from pina.model import FeedForward -from pina.operators import laplacian - +from pina import LabelTensor +from pina.geometry import CartesianDomain def test_constructor(): CartesianDomain({'x': [0, 1], 'y': [0, 1]}) @@ -20,6 +15,7 @@ def test_is_inside_check_border(): for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, True, False]): assert domain.is_inside(pt, check_border=True) == exp_result + def test_is_inside_not_check_border(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) @@ -28,6 +24,7 @@ def test_is_inside_not_check_border(): for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, False, False]): assert domain.is_inside(pt, check_border=False) == exp_result + def test_is_inside_fixed_variables(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) diff --git a/tests/test_geometry/test_difference.py b/tests/test_geometry/test_difference.py index e174521..b165fa7 100644 --- a/tests/test_geometry/test_difference.py +++ b/tests/test_geometry/test_difference.py @@ -5,25 +5,63 @@ from pina.geometry import Difference, EllipsoidDomain, CartesianDomain def test_constructor_two_CartesianDomains(): - Difference([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3]})]) + Difference([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }) + ]) def test_constructor_two_3DCartesianDomain(): - Difference([CartesianDomain({'x': [0, 2], 'y': [0, 2], 'z': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3], 'z': [1, 3]})]) + Difference([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2], + 'z': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3], + 'z': [1, 3] + }) + ]) def test_constructor_three_CartesianDomains(): - Difference([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), CartesianDomain( - {'x': [1, 3], 'y': [1, 3]}), CartesianDomain({'x': [2, 4], 'y': [2, 4]})]) + Difference([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }), + CartesianDomain({ + 'x': [2, 4], + 'y': [2, 4] + }) + ]) def test_is_inside_two_CartesianDomains(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ['x', 'y']) - domain = Difference([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3]})]) + domain = Difference([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }) + ]) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False @@ -31,16 +69,34 @@ def test_is_inside_two_CartesianDomains(): def test_is_inside_two_3DCartesianDomain(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ['x', 'y', 'z']) - domain = Difference([CartesianDomain({'x': [0, 2], 'y': [0, 2], 'z': [ - 0, 2]}), CartesianDomain({'x': [1, 3], 'y': [1, 3], 'z': [1, 3]})]) + domain = Difference([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2], + 'z': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3], + 'z': [1, 3] + }) + ]) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_sample(): n = 100 - domain = Difference([EllipsoidDomain( - {'x': [-1, 1], 'y': [-1, 1]}), CartesianDomain({'x': [-0.5, 0.5], 'y': [-0.5, 0.5]})]) + domain = Difference([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1] + }), + CartesianDomain({ + 'x': [-0.5, 0.5], + 'y': [-0.5, 0.5] + }) + ]) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_geometry/test_ellipsoid.py b/tests/test_geometry/test_ellipsoid.py index 293cfd9..9ab0989 100644 --- a/tests/test_geometry/test_ellipsoid.py +++ b/tests/test_geometry/test_ellipsoid.py @@ -5,14 +5,13 @@ from pina import LabelTensor from pina.geometry import EllipsoidDomain - def test_constructor(): EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}) - EllipsoidDomain({'x': [0, 1], 'y':[0, 1]}, sample_surface=True) + EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}, sample_surface=True) def test_is_inside_sample_surface_false(): - domain = EllipsoidDomain({'x': [0, 1], 'y':[0, 1]}, sample_surface=False) + domain = EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}, sample_surface=False) pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ['x', 'y']) @@ -21,10 +20,11 @@ def test_is_inside_sample_surface_false(): for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, True, False]): assert domain.is_inside(pt, check_border=True) == exp_result + def test_is_inside_sample_surface_true(): - domain = EllipsoidDomain({'x': [0, 1], 'y':[0, 1]}, sample_surface=True) + domain = EllipsoidDomain({'x': [0, 1], 'y': [0, 1]}, sample_surface=True) pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ['x', 'y']) pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ['x', 'y']) for pt, exp_result in zip([pt_1, pt_2, pt_3], [False, True, False]): - assert domain.is_inside(pt) == exp_result \ No newline at end of file + assert domain.is_inside(pt) == exp_result diff --git a/tests/test_geometry/test_exclusion.py b/tests/test_geometry/test_exclusion.py index 2ae53f3..b6400cd 100644 --- a/tests/test_geometry/test_exclusion.py +++ b/tests/test_geometry/test_exclusion.py @@ -5,25 +5,63 @@ from pina.geometry import Exclusion, EllipsoidDomain, CartesianDomain def test_constructor_two_CartesianDomains(): - Exclusion([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3]})]) + Exclusion([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }) + ]) def test_constructor_two_3DCartesianDomain(): - Exclusion([CartesianDomain({'x': [0, 2], 'y': [0, 2], 'z': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3], 'z': [1, 3]})]) + Exclusion([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2], + 'z': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3], + 'z': [1, 3] + }) + ]) def test_constructor_three_CartesianDomains(): - Exclusion([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), CartesianDomain( - {'x': [1, 3], 'y': [1, 3]}), CartesianDomain({'x': [2, 4], 'y': [2, 4]})]) + Exclusion([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }), + CartesianDomain({ + 'x': [2, 4], + 'y': [2, 4] + }) + ]) def test_is_inside_two_CartesianDomains(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ['x', 'y']) - domain = Exclusion([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3]})]) + domain = Exclusion([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }) + ]) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False @@ -31,16 +69,34 @@ def test_is_inside_two_CartesianDomains(): def test_is_inside_two_3DCartesianDomain(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ['x', 'y', 'z']) - domain = Exclusion([CartesianDomain({'x': [0, 2], 'y': [0, 2], 'z': [ - 0, 2]}), CartesianDomain({'x': [1, 3], 'y': [1, 3], 'z': [1, 3]})]) + domain = Exclusion([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2], + 'z': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3], + 'z': [1, 3] + }) + ]) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_sample(): n = 100 - domain = Exclusion([EllipsoidDomain( - {'x': [-1, 1], 'y': [-1, 1]}), CartesianDomain({'x': [0.3, 1.5], 'y': [0.3, 1.5]})]) + domain = Exclusion([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1] + }), + CartesianDomain({ + 'x': [0.3, 1.5], + 'y': [0.3, 1.5] + }) + ]) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_geometry/test_intersection.py b/tests/test_geometry/test_intersection.py index 1c5cd81..6106107 100644 --- a/tests/test_geometry/test_intersection.py +++ b/tests/test_geometry/test_intersection.py @@ -5,18 +5,48 @@ from pina.geometry import Intersection, EllipsoidDomain, CartesianDomain def test_constructor_two_CartesianDomains(): - Intersection([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3]})]) + Intersection([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }) + ]) def test_constructor_two_3DCartesianDomain(): - Intersection([CartesianDomain({'x': [0, 2], 'y': [0, 2], 'z': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3], 'z': [1, 3]})]) + Intersection([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2], + 'z': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3], + 'z': [1, 3] + }) + ]) def test_constructor_three_CartesianDomains(): - Intersection([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), CartesianDomain( - {'x': [1, 3], 'y': [1, 3]}), CartesianDomain({'x': [2, 4], 'y': [2, 4]})]) + Intersection([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }), + CartesianDomain({ + 'x': [2, 4], + 'y': [2, 4] + }) + ]) def test_is_inside_two_CartesianDomains(): @@ -24,8 +54,16 @@ def test_is_inside_two_CartesianDomains(): pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ['x', 'y']) pt_3 = LabelTensor(torch.tensor([[1.5, 1.5]]), ['x', 'y']) - domain = Intersection([CartesianDomain({'x': [0, 2], 'y': [0, 2]}), - CartesianDomain({'x': [1, 3], 'y': [1, 3]})]) + domain = Intersection([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3] + }) + ]) assert domain.is_inside(pt_1) == False assert domain.is_inside(pt_2) == False assert domain.is_inside(pt_3) == True @@ -35,8 +73,18 @@ def test_is_inside_two_3DCartesianDomain(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ['x', 'y', 'z']) pt_3 = LabelTensor(torch.tensor([[1.5, 1.5, 1.5]]), ['x', 'y', 'z']) - domain = Intersection([CartesianDomain({'x': [0, 2], 'y': [0, 2], 'z': [ - 0, 2]}), CartesianDomain({'x': [1, 3], 'y': [1, 3], 'z': [1, 3]})]) + domain = Intersection([ + CartesianDomain({ + 'x': [0, 2], + 'y': [0, 2], + 'z': [0, 2] + }), + CartesianDomain({ + 'x': [1, 3], + 'y': [1, 3], + 'z': [1, 3] + }) + ]) assert domain.is_inside(pt_1) == False assert domain.is_inside(pt_2) == False assert domain.is_inside(pt_3) == True @@ -44,8 +92,16 @@ def test_is_inside_two_3DCartesianDomain(): def test_sample(): n = 100 - domain = Intersection([EllipsoidDomain( - {'x': [-1, 1], 'y': [-1, 1]}), CartesianDomain({'x': [-0.5, 0.5], 'y': [-0.5, 0.5]})]) + domain = Intersection([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1] + }), + CartesianDomain({ + 'x': [-0.5, 0.5], + 'y': [-0.5, 0.5] + }) + ]) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_geometry/test_simplex.py b/tests/test_geometry/test_simplex.py index 8c4d864..1f59585 100644 --- a/tests/test_geometry/test_simplex.py +++ b/tests/test_geometry/test_simplex.py @@ -6,13 +6,11 @@ from pina.geometry import SimplexDomain def test_constructor(): - SimplexDomain( - [ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), - ] - ) + SimplexDomain([ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), + ]) SimplexDomain( [ LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), @@ -23,40 +21,33 @@ def test_constructor(): ) with pytest.raises(ValueError): # different labels - SimplexDomain( - [ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[1, 1]]), labels=["x", "z"]), - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "a"]), - ] - ) + SimplexDomain([ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[1, 1]]), labels=["x", "z"]), + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "a"]), + ]) # not LabelTensor - SimplexDomain( - [ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - [1, 1], - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), - ] - ) + SimplexDomain([ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + [1, 1], + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), + ]) # different number of vertices - SimplexDomain( - [ - LabelTensor(torch.tensor([[ 0., -2.]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[-.5, -.5]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[-2., 0.]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[-.5, .5]]), labels=["x", "y"]), - ] - ) + SimplexDomain([ + LabelTensor(torch.tensor([[0., -2.]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[-.5, -.5]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[-2., 0.]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[-.5, .5]]), labels=["x", "y"]), + ]) + def test_sample(): # sampling inside - simplex = SimplexDomain( - [ - LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), - LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), - ] - ) + simplex = SimplexDomain([ + LabelTensor(torch.tensor([[0, 0]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[1, 1]]), labels=["x", "y"]), + LabelTensor(torch.tensor([[0, 2]]), labels=["x", "y"]), + ]) pts = simplex.sample(10) assert isinstance(pts, LabelTensor) assert pts.size() == torch.Size([10, 2]) @@ -127,7 +118,8 @@ def test_is_inside_2D_check_border_false(): pt6 = LabelTensor(torch.tensor([[2.5, 1]]), ["x", "y"]) pt7 = LabelTensor(torch.tensor([[100, 100]]), ["x", "y"]) pts = [pt1, pt2, pt3, pt4, pt5, pt6, pt7] - for pt, exp_result in zip(pts, [False, False, False, False, True, True, False]): + for pt, exp_result in zip(pts, + [False, False, False, False, True, True, False]): assert domain.is_inside(point=pt, check_border=False) == exp_result @@ -152,8 +144,7 @@ def test_is_inside_3D_check_border_true(): pt9 = LabelTensor(torch.tensor([[2, 1, 1]]), ["x", "y", "z"]) pts = [pt1, pt2, pt3, pt4, pt5, pt6, pt7, pt8, pt9] for pt, exp_result in zip( - pts, [True, True, True, True, True, False, True, True, False] - ): + pts, [True, True, True, True, True, False, True, True, False]): assert domain.is_inside(point=pt, check_border=True) == exp_result @@ -175,5 +166,6 @@ def test_is_inside_3D_check_border_false(): pt6 = LabelTensor(torch.tensor([[0, 0, 20]]), ["x", "y", "z"]) pt7 = LabelTensor(torch.tensor([[2, 1, 1]]), ["x", "y", "z"]) pts = [pt1, pt2, pt3, pt4, pt5, pt6, pt7] - for pt, exp_result in zip(pts, [False, False, False, False, False, False, True]): + for pt, exp_result in zip(pts, + [False, False, False, False, False, False, True]): assert domain.is_inside(point=pt, check_border=False) == exp_result diff --git a/tests/test_geometry/test_union.py b/tests/test_geometry/test_union.py index 07787a3..16f8bca 100644 --- a/tests/test_geometry/test_union.py +++ b/tests/test_geometry/test_union.py @@ -5,25 +5,59 @@ from pina.geometry import Union, EllipsoidDomain, CartesianDomain def test_constructor_two_CartesianDomains(): - Union([CartesianDomain({'x': [0, 1], 'y': [0, 1]}), - CartesianDomain({'x': [0.5, 2], 'y': [-1, 0.1]})]) + Union([ + CartesianDomain({ + 'x': [0, 1], + 'y': [0, 1] + }), + CartesianDomain({ + 'x': [0.5, 2], + 'y': [-1, 0.1] + }) + ]) def test_constructor_two_EllipsoidDomains(): - Union([EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1], 'z': [-1, 1]}), - EllipsoidDomain({'x': [-0.5, 0.5], 'y': [-0.5, 0.5], 'z': [-0.5, 0.5]})]) + Union([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1], + 'z': [-1, 1] + }), + EllipsoidDomain({ + 'x': [-0.5, 0.5], + 'y': [-0.5, 0.5], + 'z': [-0.5, 0.5] + }) + ]) def test_constructor_EllipsoidDomain_CartesianDomain(): - Union([EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]}), - CartesianDomain({'x': [-0.5, 0.5], 'y': [-0.5, 0.5]})]) + Union([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1] + }), + CartesianDomain({ + 'x': [-0.5, 0.5], + 'y': [-0.5, 0.5] + }) + ]) def test_is_inside_two_CartesianDomains(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ['x', 'y']) - domain = Union([CartesianDomain({'x': [0, 1], 'y': [0, 1]}), - CartesianDomain({'x': [0.5, 2], 'y': [-1, 0.1]})]) + domain = Union([ + CartesianDomain({ + 'x': [0, 1], + 'y': [0, 1] + }), + CartesianDomain({ + 'x': [0.5, 2], + 'y': [-1, 0.1] + }) + ]) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False @@ -31,8 +65,18 @@ def test_is_inside_two_CartesianDomains(): def test_is_inside_two_EllipsoidDomains(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ['x', 'y', 'z']) pt_2 = LabelTensor(torch.tensor([[-1, -1, -1]]), ['x', 'y', 'z']) - domain = Union([EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1], 'z': [-1, 1]}), - EllipsoidDomain({'x': [-0.5, 0.5], 'y': [-0.5, 0.5], 'z': [-0.5, 0.5]})]) + domain = Union([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1], + 'z': [-1, 1] + }), + EllipsoidDomain({ + 'x': [-0.5, 0.5], + 'y': [-0.5, 0.5], + 'z': [-0.5, 0.5] + }) + ]) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False @@ -40,16 +84,32 @@ def test_is_inside_two_EllipsoidDomains(): def test_is_inside_EllipsoidDomain_CartesianDomain(): pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ['x', 'y']) pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ['x', 'y']) - domain = Union([EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1], }), - CartesianDomain({'x': [0.6, 1.5], 'y': [-2, 0]})]) + domain = Union([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1], + }), + CartesianDomain({ + 'x': [0.6, 1.5], + 'y': [-2, 0] + }) + ]) assert domain.is_inside(pt_1) == True assert domain.is_inside(pt_2) == False def test_sample(): n = 100 - domain = Union([EllipsoidDomain({'x': [-1, 1], 'y': [-1, 1]}), - CartesianDomain({'x': [-0.5, 0.5], 'y': [-0.5, 0.5]})]) + domain = Union([ + EllipsoidDomain({ + 'x': [-1, 1], + 'y': [-1, 1] + }), + CartesianDomain({ + 'x': [-0.5, 0.5], + 'y': [-0.5, 0.5] + }) + ]) pts = domain.sample(n) assert isinstance(pts, LabelTensor) assert pts.shape[0] == n diff --git a/tests/test_label_tensor.py b/tests/test_label_tensor.py index 1365aec..05dace5 100644 --- a/tests/test_label_tensor.py +++ b/tests/test_label_tensor.py @@ -71,6 +71,7 @@ def test_merge(): tensor_bc = tensor_b.append(tensor_c) assert torch.allclose(tensor_bc, tensor.extract(['b', 'c'])) + def test_merge2(): tensor = LabelTensor(data, labels) tensor_b = tensor.extract('b') diff --git a/tests/test_layers/test_conv.py b/tests/test_layers/test_conv.py index f0ba2af..f8ef437 100644 --- a/tests/test_layers/test_conv.py +++ b/tests/test_layers/test_conv.py @@ -10,6 +10,7 @@ def prod(iterable): def make_grid(x): + def _transform_image(image): # extracting image info @@ -17,11 +18,13 @@ def make_grid(x): # initializing transfomed image coordinates = torch.zeros( - [channels, prod(dimension), len(dimension) + 1]).to(image.device) + [channels, prod(dimension), + len(dimension) + 1]).to(image.device) # creating the n dimensional mesh grid - values_mesh = [torch.arange(0, dim).float().to( - image.device) for dim in dimension] + values_mesh = [ + torch.arange(0, dim).float().to(image.device) for dim in dimension + ] mesh = torch.meshgrid(values_mesh) coordinates_mesh = [x.reshape(-1, 1) for x in mesh] coordinates_mesh.append(0) @@ -40,11 +43,9 @@ class MLP(torch.nn.Module): def __init__(self) -> None: super().__init__() - self. model = torch.nn.Sequential(torch.nn.Linear(2, 8), - torch.nn.ReLU(), - torch.nn.Linear(8, 8), - torch.nn.ReLU(), - torch.nn.Linear(8, 1)) + self.model = torch.nn.Sequential(torch.nn.Linear(2, 8), torch.nn.ReLU(), + torch.nn.Linear(8, 8), torch.nn.ReLU(), + torch.nn.Linear(8, 1)) def forward(self, x): return self.model(x) @@ -56,10 +57,12 @@ channel_output = 6 batch = 2 N = 10 dim = [3, 3] -stride = {"domain": [10, 10], - "start": [0, 0], - "jumps": [3, 3], - "direction": [1, 1.]} +stride = { + "domain": [10, 10], + "start": [0, 0], + "jumps": [3, 3], + "direction": [1, 1.] +} dim_filter = len(dim) dim_input = (batch, channel_input, 10, dim_filter) dim_output = (batch, channel_output, 4, dim_filter) @@ -71,15 +74,15 @@ def test_constructor(): model = MLP conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model) + channel_output, + dim, + stride, + model=model) conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=None) + channel_output, + dim, + stride, + model=None) def test_forward(): @@ -87,19 +90,19 @@ def test_forward(): # simple forward conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model) + channel_output, + dim, + stride, + model=model) conv(x) # simple forward with optimization conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model, - optimize=True) + channel_output, + dim, + stride, + model=model, + optimize=True) conv(x) @@ -108,16 +111,16 @@ def test_transpose(): # simple transpose conv = ContinuousConvBlock(channel_input, - channel_output, - dim, - stride, - model=model) + channel_output, + dim, + stride, + model=model) conv2 = ContinuousConvBlock(channel_output, - channel_input, - dim, - stride, - model=model) + channel_input, + dim, + stride, + model=model) integrals = conv(x) conv2.transpose(integrals[..., -1], x) @@ -137,4 +140,4 @@ def test_transpose(): # no_overlap=True) # integrals = conv(x) - # conv.transpose(integrals[..., -1], x) \ No newline at end of file + # conv.transpose(integrals[..., -1], x) diff --git a/tests/test_layers/test_fourier.py b/tests/test_layers/test_fourier.py index af9425a..826c445 100644 --- a/tests/test_layers/test_fourier.py +++ b/tests/test_layers/test_fourier.py @@ -5,39 +5,44 @@ input_numb_fields = 3 output_numb_fields = 4 batch = 5 + def test_constructor_1d(): FourierBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=5) - + output_numb_fields=output_numb_fields, + n_modes=5) + + def test_forward_1d(): sconv = FourierBlock1D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=4) + output_numb_fields=output_numb_fields, + n_modes=4) x = torch.rand(batch, input_numb_fields, 10) sconv(x) def test_constructor_2d(): FourierBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) - + output_numb_fields=output_numb_fields, + n_modes=[5, 4]) + + def test_forward_2d(): sconv = FourierBlock2D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4]) + output_numb_fields=output_numb_fields, + n_modes=[5, 4]) x = torch.rand(batch, input_numb_fields, 10, 10) sconv(x) + def test_constructor_3d(): FourierBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) - + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4]) + + def test_forward_3d(): sconv = FourierBlock3D(input_numb_fields=input_numb_fields, - output_numb_fields=output_numb_fields, - n_modes=[5, 4, 4]) + output_numb_fields=output_numb_fields, + n_modes=[5, 4, 4]) x = torch.rand(batch, input_numb_fields, 10, 10, 10) sconv(x) diff --git a/tests/test_layers/test_residual.py b/tests/test_layers/test_residual.py index 2d2135f..fc61b7f 100644 --- a/tests/test_layers/test_residual.py +++ b/tests/test_layers/test_residual.py @@ -1,26 +1,69 @@ -from pina.model.layers import ResidualBlock +from pina.model.layers import ResidualBlock, EnhancedLinear import torch +import torch.nn as nn -def test_constructor(): +def test_constructor_residual_block(): + + res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4) - res_block = ResidualBlock(input_dim=10, - output_dim=3, - hidden_dim=4) - res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4, spectral_norm=True) -def test_forward(): +def test_forward_residual_block(): + + res_block = ResidualBlock(input_dim=10, output_dim=3, hidden_dim=4) - res_block = ResidualBlock(input_dim=10, - output_dim=3, - hidden_dim=4) - x = torch.rand(size=(80, 10)) y = res_block(x) - assert y.shape[1]==3 - assert y.shape[0]==x.shape[0] + assert y.shape[1] == 3 + assert y.shape[0] == x.shape[0] + +def test_constructor_no_activation_no_dropout(): + linear_layer = nn.Linear(10, 20) + enhanced_linear = EnhancedLinear(linear_layer) + + assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + +def test_constructor_with_activation_no_dropout(): + linear_layer = nn.Linear(10, 20) + activation = nn.ReLU() + enhanced_linear = EnhancedLinear(linear_layer, activation) + + assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters())) + +def test_constructor_no_activation_with_dropout(): + linear_layer = nn.Linear(10, 20) + dropout_prob = 0.5 + enhanced_linear = EnhancedLinear(linear_layer, dropout=dropout_prob) + + assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + +def test_constructor_with_activation_with_dropout(): + linear_layer = nn.Linear(10, 20) + activation = nn.ReLU() + dropout_prob = 0.5 + enhanced_linear = EnhancedLinear(linear_layer, activation, dropout_prob) + + assert len(list(enhanced_linear.parameters())) == len(list(linear_layer.parameters())) + len(list(activation.parameters())) + +def test_forward_enhanced_linear_no_dropout(): + + enhanced_linear = EnhancedLinear(nn.Linear(10, 3)) + + x = torch.rand(size=(80, 10)) + y = enhanced_linear(x) + assert y.shape[1] == 3 + assert y.shape[0] == x.shape[0] + +def test_forward_enhanced_linear_dropout(): + + enhanced_linear = EnhancedLinear(nn.Linear(10, 3), dropout=0.5) + + x = torch.rand(size=(80, 10)) + y = enhanced_linear(x) + assert y.shape[1] == 3 + assert y.shape[0] == x.shape[0] \ No newline at end of file diff --git a/tests/test_layers/test_spectral_conv.py b/tests/test_layers/test_spectral_conv.py index db9f399..129d38b 100644 --- a/tests/test_layers/test_spectral_conv.py +++ b/tests/test_layers/test_spectral_conv.py @@ -5,11 +5,13 @@ input_numb_fields = 3 output_numb_fields = 4 batch = 5 + def test_constructor_1d(): SpectralConvBlock1D(input_numb_fields=input_numb_fields, output_numb_fields=output_numb_fields, n_modes=5) - + + def test_forward_1d(): sconv = SpectralConvBlock1D(input_numb_fields=input_numb_fields, output_numb_fields=output_numb_fields, @@ -22,7 +24,8 @@ def test_constructor_2d(): SpectralConvBlock2D(input_numb_fields=input_numb_fields, output_numb_fields=output_numb_fields, n_modes=[5, 4]) - + + def test_forward_2d(): sconv = SpectralConvBlock2D(input_numb_fields=input_numb_fields, output_numb_fields=output_numb_fields, @@ -30,11 +33,13 @@ def test_forward_2d(): x = torch.rand(batch, input_numb_fields, 10, 10) sconv(x) + def test_constructor_3d(): SpectralConvBlock3D(input_numb_fields=input_numb_fields, output_numb_fields=output_numb_fields, n_modes=[5, 4, 4]) - + + def test_forward_3d(): sconv = SpectralConvBlock3D(input_numb_fields=input_numb_fields, output_numb_fields=output_numb_fields, diff --git a/tests/test_loss/test_lploss.py b/tests/test_loss/test_lploss.py index 24db012..3743970 100644 --- a/tests/test_loss/test_lploss.py +++ b/tests/test_loss/test_lploss.py @@ -16,34 +16,33 @@ def test_LpLoss_constructor(): for p in [float('inf'), -float('inf'), 1, 10, -8]: LpLoss(p=p) + def test_LpLoss_forward(): # l2 loss loss = LpLoss(p=2, reduction='mean') - l2_loss = torch.mean(torch.sqrt((input-target).pow(2))) + l2_loss = torch.mean(torch.sqrt((input - target).pow(2))) assert loss(input, target) == l2_loss # l1 loss loss = LpLoss(p=1, reduction='sum') - l1_loss = torch.sum(torch.abs(input-target)) + l1_loss = torch.sum(torch.abs(input - target)) assert loss(input, target) == l1_loss + def test_LpRelativeLoss_constructor(): # test reduction for reduction in available_reductions: LpLoss(reduction=reduction, relative=True) # test p for p in [float('inf'), -float('inf'), 1, 10, -8]: - LpLoss(p=p,relative=True) + LpLoss(p=p, relative=True) + def test_LpRelativeLoss_forward(): # l2 relative loss - loss = LpLoss(p=2, reduction='mean',relative=True) - l2_loss = torch.sqrt((input-target).pow(2))/torch.sqrt(input.pow(2)) + loss = LpLoss(p=2, reduction='mean', relative=True) + l2_loss = torch.sqrt((input - target).pow(2)) / torch.sqrt(input.pow(2)) assert loss(input, target) == torch.mean(l2_loss) # l1 relative loss - loss = LpLoss(p=1, reduction='sum',relative=True) - l1_loss = torch.abs(input-target)/torch.abs(input) + loss = LpLoss(p=1, reduction='sum', relative=True) + l1_loss = torch.abs(input - target) / torch.abs(input) assert loss(input, target) == torch.sum(l1_loss) - - - - diff --git a/tests/test_loss/test_powerloss.py b/tests/test_loss/test_powerloss.py index 5f72718..7ea2675 100644 --- a/tests/test_loss/test_powerloss.py +++ b/tests/test_loss/test_powerloss.py @@ -16,34 +16,33 @@ def test_PowerLoss_constructor(): for p in [float('inf'), -float('inf'), 1, 10, -8]: PowerLoss(p=p) + def test_PowerLoss_forward(): # l2 loss loss = PowerLoss(p=2, reduction='mean') - l2_loss = torch.mean((input-target).pow(2)) + l2_loss = torch.mean((input - target).pow(2)) assert loss(input, target) == l2_loss # l1 loss loss = PowerLoss(p=1, reduction='sum') - l1_loss = torch.sum(torch.abs(input-target)) + l1_loss = torch.sum(torch.abs(input - target)) assert loss(input, target) == l1_loss + def test_LpRelativeLoss_constructor(): # test reduction for reduction in available_reductions: PowerLoss(reduction=reduction, relative=True) # test p for p in [float('inf'), -float('inf'), 1, 10, -8]: - PowerLoss(p=p,relative=True) + PowerLoss(p=p, relative=True) + def test_LpRelativeLoss_forward(): # l2 relative loss - loss = PowerLoss(p=2, reduction='mean',relative=True) - l2_loss = (input-target).pow(2)/input.pow(2) + loss = PowerLoss(p=2, reduction='mean', relative=True) + l2_loss = (input - target).pow(2) / input.pow(2) assert loss(input, target) == torch.mean(l2_loss) # l1 relative loss - loss = PowerLoss(p=1, reduction='sum',relative=True) - l1_loss = torch.abs(input-target)/torch.abs(input) + loss = PowerLoss(p=1, reduction='sum', relative=True) + l1_loss = torch.abs(input - target) / torch.abs(input) assert loss(input, target) == torch.sum(l1_loss) - - - - diff --git a/tests/test_model/test_deeponet.py b/tests/test_model/test_deeponet.py index 348175b..cfba614 100644 --- a/tests/test_model/test_deeponet.py +++ b/tests/test_model/test_deeponet.py @@ -32,36 +32,39 @@ def test_constructor_fails_when_invalid_inner_layer_size(): reduction='+', aggregator='*') + def test_forward_extract_str(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction='+', - aggregator='*') + trunk_net=trunk_net, + input_indeces_branch_net=['a'], + input_indeces_trunk_net=['b', 'c'], + reduction='+', + aggregator='*') model(input_) + def test_forward_extract_int(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=[0], - input_indeces_trunk_net=[1, 2], - reduction='+', - aggregator='*') + trunk_net=trunk_net, + input_indeces_branch_net=[0], + input_indeces_trunk_net=[1, 2], + reduction='+', + aggregator='*') model(data) + def test_forward_extract_str_wrong(): branch_net = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=2, output_dimensions=10) model = DeepONet(branch_net=branch_net, - trunk_net=trunk_net, - input_indeces_branch_net=['a'], - input_indeces_trunk_net=['b', 'c'], - reduction='+', - aggregator='*') + trunk_net=trunk_net, + input_indeces_branch_net=['a'], + input_indeces_trunk_net=['b', 'c'], + reduction='+', + aggregator='*') with pytest.raises(RuntimeError): model(data) diff --git a/tests/test_model/test_fnn.py b/tests/test_model/test_fnn.py index 20f2d8c..bdd38fe 100644 --- a/tests/test_model/test_fnn.py +++ b/tests/test_model/test_fnn.py @@ -3,7 +3,6 @@ import pytest from pina.model import FeedForward - data = torch.rand((20, 3)) input_vars = 3 output_vars = 4 @@ -13,19 +12,24 @@ def test_constructor(): FeedForward(input_vars, output_vars) FeedForward(input_vars, output_vars, inner_size=10, n_layers=20) FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2]) - FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2], + FeedForward(input_vars, + output_vars, + layers=[10, 20, 5, 2], func=torch.nn.ReLU) - FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2], + FeedForward(input_vars, + output_vars, + layers=[10, 20, 5, 2], func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh]) def test_constructor_wrong(): with pytest.raises(RuntimeError): - FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2], + FeedForward(input_vars, + output_vars, + layers=[10, 20, 5, 2], func=[torch.nn.ReLU, torch.nn.ReLU]) - def test_forward(): dim_in, dim_out = 3, 2 fnn = FeedForward(dim_in, dim_out) diff --git a/tests/test_model/test_fno.py b/tests/test_model/test_fno.py index 7c2613c..322ce1f 100644 --- a/tests/test_model/test_fno.py +++ b/tests/test_model/test_fno.py @@ -1,7 +1,6 @@ import torch from pina.model import FNO - output_channels = 5 batch_size = 15 resolution = [30, 40, 50] @@ -11,7 +10,7 @@ lifting_dim = 128 def test_constructor(): input_channels = 3 lifting_net = torch.nn.Linear(input_channels, lifting_dim) - projecting_net = torch.nn.Linear(60, output_channels) + projecting_net = torch.nn.Linear(60, output_channels) # simple constructor FNO(lifting_net=lifting_net, @@ -20,7 +19,7 @@ def test_constructor(): dimensions=3, inner_size=60, n_layers=5) - + # simple constructor with n_modes list FNO(lifting_net=lifting_net, projecting_net=projecting_net, @@ -36,53 +35,61 @@ def test_constructor(): dimensions=3, inner_size=60, n_layers=2) - + # simple constructor with n_modes list of list - projecting_net = torch.nn.Linear(50, output_channels) + projecting_net = torch.nn.Linear(50, output_channels) FNO(lifting_net=lifting_net, projecting_net=projecting_net, n_modes=5, dimensions=3, layers=[50, 50]) - + + def test_1d_forward(): input_channels = 1 input_ = torch.rand(batch_size, resolution[0], input_channels) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=1, - inner_size=60, - n_layers=2) + projecting_net=projecting_net, + n_modes=5, + dimensions=1, + inner_size=60, + n_layers=2) out = fno(input_) assert out.shape == torch.Size([batch_size, resolution[0], output_channels]) + def test_2d_forward(): input_channels = 2 - input_ = torch.rand(batch_size, resolution[0], resolution[1], input_channels) + input_ = torch.rand(batch_size, resolution[0], resolution[1], + input_channels) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=2, - inner_size=60, - n_layers=2) + projecting_net=projecting_net, + n_modes=5, + dimensions=2, + inner_size=60, + n_layers=2) out = fno(input_) - assert out.shape == torch.Size([batch_size, resolution[0], resolution[1], output_channels]) + assert out.shape == torch.Size( + [batch_size, resolution[0], resolution[1], output_channels]) + def test_3d_forward(): input_channels = 3 - input_ = torch.rand(batch_size, resolution[0], resolution[1], resolution[2], input_channels) + input_ = torch.rand(batch_size, resolution[0], resolution[1], resolution[2], + input_channels) lifting_net = torch.nn.Linear(input_channels, lifting_dim) projecting_net = torch.nn.Linear(60, output_channels) fno = FNO(lifting_net=lifting_net, - projecting_net=projecting_net, - n_modes=5, - dimensions=3, - inner_size=60, - n_layers=2) + projecting_net=projecting_net, + n_modes=5, + dimensions=3, + inner_size=60, + n_layers=2) out = fno(input_) - assert out.shape == torch.Size([batch_size, resolution[0], resolution[1], resolution[2], output_channels]) \ No newline at end of file + assert out.shape == torch.Size([ + batch_size, resolution[0], resolution[1], resolution[2], output_channels + ]) diff --git a/tests/test_model/test_mionet.py b/tests/test_model/test_mionet.py index 1983429..4e9c03c 100644 --- a/tests/test_model/test_mionet.py +++ b/tests/test_model/test_mionet.py @@ -14,59 +14,42 @@ def test_constructor(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=2, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1 : ['x'], - branch_net2 : ['x', 'y'], - trunk_net : ['z']} - MIONet(networks=networks, - reduction='+', - aggregator='*') + networks = {branch_net1: ['x'], branch_net2: ['x', 'y'], trunk_net: ['z']} + MIONet(networks=networks, reduction='+', aggregator='*') def test_constructor_fails_when_invalid_inner_layer_size(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=2, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=12) - networks = {branch_net1 : ['x'], - branch_net2 : ['x', 'y'], - trunk_net : ['z']} + networks = {branch_net1: ['x'], branch_net2: ['x', 'y'], trunk_net: ['z']} with pytest.raises(ValueError): - MIONet(networks=networks, - reduction='+', - aggregator='*') + MIONet(networks=networks, reduction='+', aggregator='*') + def test_forward_extract_str(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1 : ['a'], - branch_net2 : ['b'], - trunk_net : ['c']} - model = MIONet(networks=networks, - reduction='+', - aggregator='*') + networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']} + model = MIONet(networks=networks, reduction='+', aggregator='*') model(input_) + def test_forward_extract_int(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1 : [0], - branch_net2 : [1], - trunk_net : [2]} - model = MIONet(networks=networks, - reduction='+', - aggregator='*') + networks = {branch_net1: [0], branch_net2: [1], trunk_net: [2]} + model = MIONet(networks=networks, reduction='+', aggregator='*') model(data) + def test_forward_extract_str_wrong(): branch_net1 = FeedForward(input_dimensions=1, output_dimensions=10) branch_net2 = FeedForward(input_dimensions=1, output_dimensions=10) trunk_net = FeedForward(input_dimensions=1, output_dimensions=10) - networks = {branch_net1 : ['a'], - branch_net2 : ['b'], - trunk_net : ['c']} - model = MIONet(networks=networks, - reduction='+', - aggregator='*') + networks = {branch_net1: ['a'], branch_net2: ['b'], trunk_net: ['c']} + model = MIONet(networks=networks, reduction='+', aggregator='*') with pytest.raises(RuntimeError): model(data) diff --git a/tests/test_model/test_residualfnn.py b/tests/test_model/test_residualfnn.py index d8c07aa..eef05b1 100644 --- a/tests/test_model/test_residualfnn.py +++ b/tests/test_model/test_residualfnn.py @@ -2,21 +2,25 @@ import torch import pytest from pina.model import ResidualFeedForward + def test_constructor(): # simple constructor ResidualFeedForward(input_dimensions=2, output_dimensions=1) # wrong transformer nets (not 2) with pytest.raises(ValueError): - ResidualFeedForward(input_dimensions=2, output_dimensions=1, transformer_nets=[torch.nn.Linear(2, 20)]) + ResidualFeedForward(input_dimensions=2, + output_dimensions=1, + transformer_nets=[torch.nn.Linear(2, 20)]) # wrong transformer nets (not nn.Module) with pytest.raises(ValueError): - ResidualFeedForward(input_dimensions=2, output_dimensions=1, transformer_nets=[2, 2]) + ResidualFeedForward(input_dimensions=2, + output_dimensions=1, + transformer_nets=[2, 2]) + def test_forward(): x = torch.rand(10, 2) model = ResidualFeedForward(input_dimensions=2, output_dimensions=1) model(x) - - diff --git a/tests/test_operators.py b/tests/test_operators.py index f577c70..aa9ea9d 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -4,9 +4,11 @@ import pytest from pina import LabelTensor from pina.operators import grad, div, laplacian + def func_vec(x): return x**2 + def func_scalar(x): print('X') x_ = x.extract(['x']) @@ -14,6 +16,7 @@ def func_scalar(x): mu_ = x.extract(['mu']) return x_**2 + y_**2 + mu_**3 + data = torch.rand((20, 3), requires_grad=True) inp = LabelTensor(data, ['x', 'y', 'mu']) labels = ['a', 'b', 'c'] @@ -24,10 +27,15 @@ tensor_s = LabelTensor(func_scalar(inp).reshape(-1, 1), labels[0]) def test_grad_scalar_output(): grad_tensor_s = grad(tensor_s, inp) assert grad_tensor_s.shape == inp.shape - assert grad_tensor_s.labels == [f'd{tensor_s.labels[0]}d{i}' for i in inp.labels] + assert grad_tensor_s.labels == [ + f'd{tensor_s.labels[0]}d{i}' for i in inp.labels + ] grad_tensor_s = grad(tensor_s, inp, d=['x', 'y']) assert grad_tensor_s.shape == (inp.shape[0], 2) - assert grad_tensor_s.labels == [f'd{tensor_s.labels[0]}d{i}' for i in ['x', 'y']] + assert grad_tensor_s.labels == [ + f'd{tensor_s.labels[0]}d{i}' for i in ['x', 'y'] + ] + def test_grad_vector_output(): grad_tensor_v = grad(tensor_v, inp) @@ -35,19 +43,24 @@ def test_grad_vector_output(): grad_tensor_v = grad(tensor_v, inp, d=['x', 'mu']) assert grad_tensor_v.shape == (inp.shape[0], 6) + def test_div_vector_output(): grad_tensor_v = div(tensor_v, inp) assert grad_tensor_v.shape == (20, 1) grad_tensor_v = div(tensor_v, inp, components=['a', 'b'], d=['x', 'mu']) assert grad_tensor_v.shape == (inp.shape[0], 1) + def test_laplacian_scalar_output(): laplace_tensor_v = laplacian(tensor_s, inp, components=['a'], d=['x', 'y']) assert laplace_tensor_v.shape == tensor_s.shape + def test_laplacian_vector_output(): laplace_tensor_v = laplacian(tensor_v, inp) assert laplace_tensor_v.shape == tensor_v.shape - laplace_tensor_v = laplacian(tensor_v, inp, components=['a', 'b'], d=['x', 'y']) + laplace_tensor_v = laplacian(tensor_v, + inp, + components=['a', 'b'], + d=['x', 'y']) assert laplace_tensor_v.shape == tensor_v.extract(['a', 'b']).shape - diff --git a/tests/test_problem.py b/tests/test_problem.py index 09b4fc0..8dcd499 100644 --- a/tests/test_problem.py +++ b/tests/test_problem.py @@ -10,45 +10,59 @@ from pina.equation.equation_factory import FixedValue def laplace_equation(input_, output_): - force_term = (torch.sin(input_.extract(['x'])*torch.pi) * - torch.sin(input_.extract(['y'])*torch.pi)) + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) delta_u = laplacian(output_.extract(['u']), input_) return delta_u - force_term + my_laplace = Equation(laplace_equation) in_ = LabelTensor(torch.tensor([[0., 1.]], requires_grad=True), ['x', 'y']) out_ = LabelTensor(torch.tensor([[0.]], requires_grad=True), ['u']) + class Poisson(SpatialProblem): output_variables = ['u'] spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) conditions = { - 'gamma1': Condition( - location=CartesianDomain({'x': [0, 1], 'y': 1}), - equation=FixedValue(0.0)), - 'gamma2': Condition( - location=CartesianDomain({'x': [0, 1], 'y': 0}), - equation=FixedValue(0.0)), - 'gamma3': Condition( - location=CartesianDomain({'x': 1, 'y': [0, 1]}), - equation=FixedValue(0.0)), - 'gamma4': Condition( - location=CartesianDomain({'x': 0, 'y': [0, 1]}), - equation=FixedValue(0.0)), - 'D': Condition( - location=CartesianDomain({'x': [0, 1], 'y': [0, 1]}), - equation=my_laplace), - 'data': Condition( - input_points=in_, - output_points=out_) + 'gamma1': + Condition(location=CartesianDomain({ + 'x': [0, 1], + 'y': 1 + }), + equation=FixedValue(0.0)), + 'gamma2': + Condition(location=CartesianDomain({ + 'x': [0, 1], + 'y': 0 + }), + equation=FixedValue(0.0)), + 'gamma3': + Condition(location=CartesianDomain({ + 'x': 1, + 'y': [0, 1] + }), + equation=FixedValue(0.0)), + 'gamma4': + Condition(location=CartesianDomain({ + 'x': 0, + 'y': [0, 1] + }), + equation=FixedValue(0.0)), + 'D': + Condition(location=CartesianDomain({ + 'x': [0, 1], + 'y': [0, 1] + }), + equation=my_laplace), + 'data': + Condition(input_points=in_, output_points=out_) } def poisson_sol(self, pts): - return -( - torch.sin(pts.extract(['x'])*torch.pi) * - torch.sin(pts.extract(['y'])*torch.pi) - )/(2*torch.pi**2) + return -(torch.sin(pts.extract(['x']) * torch.pi) * + torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2) truth_solution = poisson_sol @@ -78,11 +92,16 @@ def test_discretise_domain(): poisson_problem.discretise_domain(n, 'lh', locations=['D']) assert poisson_problem.input_pts['D'].shape[0] == n + def test_sampling_few_variables(): n = 10 - poisson_problem.discretise_domain(n, 'grid', locations=['D'], variables=['x']) + poisson_problem.discretise_domain(n, + 'grid', + locations=['D'], + variables=['x']) assert poisson_problem.input_pts['D'].shape[1] == 1 - assert poisson_problem._have_sampled_points['D'] is False + assert poisson_problem._have_sampled_points['D'] is False + # def test_sampling_all_args(): # n = 10 diff --git a/tests/test_solvers/test_garom.py b/tests/test_solvers/test_garom.py index 3087d7d..4ff4e1c 100644 --- a/tests/test_solvers/test_garom.py +++ b/tests/test_solvers/test_garom.py @@ -15,6 +15,7 @@ def func(x, mu1, mu2): norm = x[:, 0]**2 + x[:, 1]**2 return torch.exp(-(x_m1 + x_m2)) + class ParametricGaussian(AbstractProblem): output_variables = [f'u_{i}' for i in range(900)] @@ -24,7 +25,7 @@ class ParametricGaussian(AbstractProblem): params = LabelTensor(torch.cartesian_prod(xx, yy), labels=['mu1', 'mu2']) # define domain - x = torch.linspace(-1, 1, 30) + x = torch.linspace(-1, 1, 30) domain = torch.cartesian_prod(x, x) triang = tri.Triangulation(domain[:, 0], domain[:, 1]) sol = [] @@ -34,15 +35,18 @@ class ParametricGaussian(AbstractProblem): # define conditions conditions = { - 'data': Condition( - input_points=params, - output_points=snapshots) + 'data': Condition(input_points=params, output_points=snapshots) } + # simple Generator Network class Generator(nn.Module): - def __init__(self, input_dimension, parameters_dimension, - noise_dimension, activation=torch.nn.SiLU): + + def __init__(self, + input_dimension, + parameters_dimension, + noise_dimension, + activation=torch.nn.SiLU): super().__init__() self._noise_dimension = noise_dimension @@ -53,13 +57,12 @@ class Generator(nn.Module): self._activation(), torch.nn.Linear(input_dimension // 6, input_dimension // 3), self._activation(), - torch.nn.Linear(input_dimension // 3, input_dimension) - ) + torch.nn.Linear(input_dimension // 3, input_dimension)) self.condition = torch.nn.Sequential( torch.nn.Linear(parameters_dimension, 2 * self._noise_dimension), self._activation(), - torch.nn.Linear(2 * self._noise_dimension, 5 * self._noise_dimension) - ) + torch.nn.Linear(2 * self._noise_dimension, + 5 * self._noise_dimension)) def forward(self, param): # uniform sampling in [-1, 1] @@ -78,8 +81,12 @@ class Generator(nn.Module): # Simple Discriminator Network class Discriminator(nn.Module): - def __init__(self, input_dimension, parameter_dimension, - hidden_dimension, activation=torch.nn.ReLU): + + def __init__(self, + input_dimension, + parameter_dimension, + hidden_dimension, + activation=torch.nn.ReLU): super().__init__() self._activation = activation @@ -88,10 +95,9 @@ class Discriminator(nn.Module): self._activation(), torch.nn.Linear(input_dimension // 3, input_dimension // 6), self._activation(), - torch.nn.Linear(input_dimension // 6, hidden_dimension) - ) + torch.nn.Linear(input_dimension // 6, hidden_dimension)) self.decoding = torch.nn.Sequential( - torch.nn.Linear(2*hidden_dimension, input_dimension // 6), + torch.nn.Linear(2 * hidden_dimension, input_dimension // 6), self._activation(), torch.nn.Linear(input_dimension // 6, input_dimension // 3), self._activation(), @@ -101,9 +107,8 @@ class Discriminator(nn.Module): self.condition = torch.nn.Sequential( torch.nn.Linear(parameter_dimension, hidden_dimension // 2), self._activation(), - torch.nn.Linear(hidden_dimension // 2, hidden_dimension) - ) - + torch.nn.Linear(hidden_dimension // 2, hidden_dimension)) + def forward(self, data): x, condition = data encoding = self.encoding(x) @@ -114,49 +119,49 @@ class Discriminator(nn.Module): problem = ParametricGaussian() + def test_constructor(): - GAROM(problem = problem, - generator = Generator(input_dimension=900, - parameters_dimension=2, - noise_dimension=12), - discriminator = Discriminator(input_dimension=900, - parameter_dimension=2, - hidden_dimension=64) - ) + GAROM(problem=problem, + generator=Generator(input_dimension=900, + parameters_dimension=2, + noise_dimension=12), + discriminator=Discriminator(input_dimension=900, + parameter_dimension=2, + hidden_dimension=64)) + def test_train_cpu(): - solver = GAROM(problem = problem, - generator = Generator(input_dimension=900, - parameters_dimension=2, - noise_dimension=12), - discriminator = Discriminator(input_dimension=900, - parameter_dimension=2, - hidden_dimension=64) - ) + solver = GAROM(problem=problem, + generator=Generator(input_dimension=900, + parameters_dimension=2, + noise_dimension=12), + discriminator=Discriminator(input_dimension=900, + parameter_dimension=2, + hidden_dimension=64)) trainer = Trainer(solver=solver, max_epochs=4, accelerator='cpu', batch_size=20) trainer.train() + def test_sample(): - solver = GAROM(problem = problem, - generator = Generator(input_dimension=900, - parameters_dimension=2, - noise_dimension=12), - discriminator = Discriminator(input_dimension=900, - parameter_dimension=2, - hidden_dimension=64) - ) + solver = GAROM(problem=problem, + generator=Generator(input_dimension=900, + parameters_dimension=2, + noise_dimension=12), + discriminator=Discriminator(input_dimension=900, + parameter_dimension=2, + hidden_dimension=64)) solver.sample(problem.params) assert solver.sample(problem.params).shape == problem.snapshots.shape + def test_forward(): - solver = GAROM(problem = problem, - generator = Generator(input_dimension=900, - parameters_dimension=2, - noise_dimension=12), - discriminator = Discriminator(input_dimension=900, - parameter_dimension=2, - hidden_dimension=64) - ) + solver = GAROM(problem=problem, + generator=Generator(input_dimension=900, + parameters_dimension=2, + noise_dimension=12), + discriminator=Discriminator(input_dimension=900, + parameter_dimension=2, + hidden_dimension=64)) solver(problem.params, mc_steps=100, variance=True) - assert solver(problem.params).shape == problem.snapshots.shape \ No newline at end of file + assert solver(problem.params).shape == problem.snapshots.shape diff --git a/tests/test_solvers/test_pinn.py b/tests/test_solvers/test_pinn.py index 47570ac..0a42410 100644 --- a/tests/test_solvers/test_pinn.py +++ b/tests/test_solvers/test_pinn.py @@ -1,30 +1,31 @@ import torch -import pytest from pina.problem import SpatialProblem from pina.operators import laplacian from pina.geometry import CartesianDomain -from pina import Condition, LabelTensor, PINN +from pina import Condition, LabelTensor +from pina.solvers import PINN from pina.trainer import Trainer from pina.model import FeedForward from pina.equation.equation import Equation from pina.equation.equation_factory import FixedValue -from pina.plotter import Plotter from pina.loss import LpLoss def laplace_equation(input_, output_): - force_term = (torch.sin(input_.extract(['x'])*torch.pi) * - torch.sin(input_.extract(['y'])*torch.pi)) + force_term = (torch.sin(input_.extract(['x']) * torch.pi) * + torch.sin(input_.extract(['y']) * torch.pi)) delta_u = laplacian(output_.extract(['u']), input_) return delta_u - force_term + my_laplace = Equation(laplace_equation) in_ = LabelTensor(torch.tensor([[0., 1.]]), ['x', 'y']) out_ = LabelTensor(torch.tensor([[0.]]), ['u']) in2_ = LabelTensor(torch.rand(60, 2), ['x', 'y']) out2_ = LabelTensor(torch.rand(60, 1), ['u']) + class Poisson(SpatialProblem): output_variables = ['u'] spatial_domain = CartesianDomain({'x': [0, 1], 'y': [0, 1]}) @@ -54,42 +55,48 @@ class Poisson(SpatialProblem): } def poisson_sol(self, pts): - return -( - torch.sin(pts.extract(['x'])*torch.pi) * - torch.sin(pts.extract(['y'])*torch.pi) - )/(2*torch.pi**2) + return -(torch.sin(pts.extract(['x']) * torch.pi) * + torch.sin(pts.extract(['y']) * torch.pi)) / (2 * torch.pi**2) truth_solution = poisson_sol + class myFeature(torch.nn.Module): """ Feature: sin(x) """ - def __init__(self): super(myFeature, self).__init__() def forward(self, x): - t = (torch.sin(x.extract(['x'])*torch.pi) * - torch.sin(x.extract(['y'])*torch.pi)) + t = (torch.sin(x.extract(['x']) * torch.pi) * + torch.sin(x.extract(['y']) * torch.pi)) return LabelTensor(t, ['sin(x)sin(y)']) # make the problem poisson_problem = Poisson() -model = FeedForward(len(poisson_problem.input_variables),len(poisson_problem.output_variables)) -model_extra_feats = FeedForward(len(poisson_problem.input_variables)+1,len(poisson_problem.output_variables)) +model = FeedForward(len(poisson_problem.input_variables), + len(poisson_problem.output_variables)) +model_extra_feats = FeedForward( + len(poisson_problem.input_variables) + 1, + len(poisson_problem.output_variables)) extra_feats = [myFeature()] def test_constructor(): - PINN(problem = poisson_problem, model=model, extra_features=None) + PINN(problem=poisson_problem, model=model, extra_features=None) def test_constructor_extra_feats(): - model_extra_feats = FeedForward(len(poisson_problem.input_variables)+1,len(poisson_problem.output_variables)) - PINN(problem = poisson_problem, model=model_extra_feats, extra_features=extra_feats) + model_extra_feats = FeedForward( + len(poisson_problem.input_variables) + 1, + len(poisson_problem.output_variables)) + PINN(problem=poisson_problem, + model=model_extra_feats, + extra_features=extra_feats) + def test_train_cpu(): poisson_problem = Poisson() @@ -100,14 +107,21 @@ def test_train_cpu(): trainer = Trainer(solver=pinn, max_epochs=1, accelerator='cpu', batch_size=20) trainer.train() + def test_train_restore(): tmpdir = "tests/tmp_restore" poisson_problem = Poisson() boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] n = 10 poisson_problem.discretise_domain(n, 'grid', locations=boundaries) - pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) - trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu', default_root_dir=tmpdir) + pinn = PINN(problem=poisson_problem, + model=model, + extra_features=None, + loss=LpLoss()) + trainer = Trainer(solver=pinn, + max_epochs=5, + accelerator='cpu', + default_root_dir=tmpdir) trainer.train() ntrainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu') t = ntrainer.train( @@ -115,31 +129,40 @@ def test_train_restore(): import shutil shutil.rmtree(tmpdir) + def test_train_load(): tmpdir = "tests/tmp_load" poisson_problem = Poisson() boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] n = 10 poisson_problem.discretise_domain(n, 'grid', locations=boundaries) - pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) - trainer = Trainer(solver=pinn, max_epochs=15, accelerator='cpu', - default_root_dir=tmpdir) + pinn = PINN(problem=poisson_problem, + model=model, + extra_features=None, + loss=LpLoss()) + trainer = Trainer(solver=pinn, + max_epochs=15, + accelerator='cpu', + default_root_dir=tmpdir) trainer.train() new_pinn = PINN.load_from_checkpoint( f'{tmpdir}/lightning_logs/version_0/checkpoints/epoch=14-step=30.ckpt', problem = poisson_problem, model=model) test_pts = CartesianDomain({'x': [0, 1], 'y': [0, 1]}).sample(10) assert new_pinn.forward(test_pts).extract(['u']).shape == (10, 1) - assert new_pinn.forward(test_pts).extract(['u']).shape == pinn.forward(test_pts).extract(['u']).shape - torch.testing.assert_close(new_pinn.forward(test_pts).extract(['u']), pinn.forward(test_pts).extract(['u'])) + assert new_pinn.forward(test_pts).extract( + ['u']).shape == pinn.forward(test_pts).extract(['u']).shape + torch.testing.assert_close( + new_pinn.forward(test_pts).extract(['u']), + pinn.forward(test_pts).extract(['u'])) import shutil shutil.rmtree(tmpdir) - + # # TODO fix asap. Basically sampling few variables # # works only if both variables are in a range. # # if one is fixed and the other not, this will -# # not work. This test also needs to be fixed and +# # not work. This test also needs to be fixed and # # insert in test problem not in test pinn. # def test_train_cpu_sampling_few_vars(): # poisson_problem = Poisson() @@ -158,12 +181,15 @@ def test_train_extra_feats_cpu(): boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] n = 10 poisson_problem.discretise_domain(n, 'grid', locations=boundaries) - pinn = PINN(problem = poisson_problem, model=model_extra_feats, extra_features=extra_feats) + pinn = PINN(problem=poisson_problem, + model=model_extra_feats, + extra_features=extra_feats) trainer = Trainer(solver=pinn, max_epochs=5, accelerator='cpu') trainer.train() + # TODO, fix GitHub actions to run also on GPU -# def test_train_gpu(): +# def test_train_gpu(): # poisson_problem = Poisson() # boundaries = ['gamma1', 'gamma2', 'gamma3', 'gamma4'] # n = 10 @@ -171,7 +197,6 @@ def test_train_extra_feats_cpu(): # pinn = PINN(problem = poisson_problem, model=model, extra_features=None, loss=LpLoss()) # trainer = Trainer(solver=pinn, kwargs={'max_epochs' : 5, 'accelerator':'gpu'}) # trainer.train() - """ def test_train_gpu(): #TODO fix ASAP poisson_problem = Poisson() diff --git a/tests/test_utils.py b/tests/test_utils.py index 94895e5..46305f6 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -15,9 +15,9 @@ def test_merge_tensors(): tensor3 = LabelTensor(torch.ones((30, 3)), ['g', 'h', 'i']) merged_tensor = merge_tensors((tensor1, tensor2, tensor3)) - assert tuple(merged_tensor.labels) == ( - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i') - assert merged_tensor.shape == (20*20*30, 9) + assert tuple(merged_tensor.labels) == ('a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i') + assert merged_tensor.shape == (20 * 20 * 30, 9) assert torch.all(merged_tensor.extract(('d', 'e', 'f')) == 0) assert torch.all(merged_tensor.extract(('g', 'h', 'i')) == 1) @@ -25,7 +25,7 @@ def test_merge_tensors(): def test_check_consistency_correct(): ellipsoid1 = EllipsoidDomain({'x': [1, 2], 'y': [-2, 1]}) example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ['x', 'y', 'z']) - + check_consistency(example_input_pts, torch.Tensor) check_consistency(CartesianDomain, Location, subclass=True) check_consistency(ellipsoid1, Location) @@ -34,7 +34,7 @@ def test_check_consistency_correct(): def test_check_consistency_incorrect(): ellipsoid1 = EllipsoidDomain({'x': [1, 2], 'y': [-2, 1]}) example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ['x', 'y', 'z']) - + with pytest.raises(ValueError): check_consistency(example_input_pts, Location) with pytest.raises(ValueError):